diff options
233 files changed, 14474 insertions, 4689 deletions
diff --git a/Documentation/crypto/architecture.rst b/Documentation/crypto/architecture.rst index 15dcd62fd22f..249b54d0849f 100644 --- a/Documentation/crypto/architecture.rst +++ b/Documentation/crypto/architecture.rst @@ -196,8 +196,6 @@ the aforementioned cipher types: - CRYPTO_ALG_TYPE_CIPHER Single block cipher -- CRYPTO_ALG_TYPE_COMPRESS Compression - - CRYPTO_ALG_TYPE_AEAD Authenticated Encryption with Associated Data (MAC) diff --git a/Documentation/crypto/index.rst b/Documentation/crypto/index.rst index 92eec78b5713..100b47d049c0 100644 --- a/Documentation/crypto/index.rst +++ b/Documentation/crypto/index.rst @@ -26,3 +26,4 @@ for cryptographic use cases, as well as programming examples. api-samples descore-readme device_drivers/index + krb5 diff --git a/Documentation/crypto/krb5.rst b/Documentation/crypto/krb5.rst new file mode 100644 index 000000000000..beffa0133446 --- /dev/null +++ b/Documentation/crypto/krb5.rst @@ -0,0 +1,262 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=========================== +Kerberos V Cryptography API +=========================== + +.. Contents: + + - Overview. + - Small Buffer. + - Encoding Type. + - Key Derivation. + - PRF+ Calculation. + - Kc, Ke And Ki Derivation. + - Crypto Functions. + - Preparation Functions. + - Encryption Mode. + - Checksum Mode. + - The krb5enc AEAD algorithm + +Overview +======== + +This API provides Kerberos 5-style cryptography for key derivation, encryption +and checksumming for use in network filesystems and can be used to implement +the low-level crypto that's needed for GSSAPI. + +The following crypto types are supported:: + + KRB5_ENCTYPE_AES128_CTS_HMAC_SHA1_96 + KRB5_ENCTYPE_AES256_CTS_HMAC_SHA1_96 + KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128 + KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192 + KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC + KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC + + KRB5_CKSUMTYPE_HMAC_SHA1_96_AES128 + KRB5_CKSUMTYPE_HMAC_SHA1_96_AES256 + KRB5_CKSUMTYPE_CMAC_CAMELLIA128 + KRB5_CKSUMTYPE_CMAC_CAMELLIA256 + KRB5_CKSUMTYPE_HMAC_SHA256_128_AES128 + KRB5_CKSUMTYPE_HMAC_SHA384_192_AES256 + +The API can be included by:: + + #include <crypto/krb5.h> + +Small Buffer +------------ + +To pass small pieces of data about, such as keys, a buffer structure is +defined, giving a pointer to the data and the size of that data:: + + struct krb5_buffer { + unsigned int len; + void *data; + }; + +Encoding Type +============= + +The encoding type is defined by the following structure:: + + struct krb5_enctype { + int etype; + int ctype; + const char *name; + u16 key_bytes; + u16 key_len; + u16 Kc_len; + u16 Ke_len; + u16 Ki_len; + u16 prf_len; + u16 block_len; + u16 conf_len; + u16 cksum_len; + ... + }; + +The fields of interest to the user of the API are as follows: + + * ``etype`` and ``ctype`` indicate the protocol number for this encoding + type for encryption and checksumming respectively. They hold + ``KRB5_ENCTYPE_*`` and ``KRB5_CKSUMTYPE_*`` constants. + + * ``name`` is the formal name of the encoding. + + * ``key_len`` and ``key_bytes`` are the input key length and the derived key + length. (I think they only differ for DES, which isn't supported here). + + * ``Kc_len``, ``Ke_len`` and ``Ki_len`` are the sizes of the derived Kc, Ke + and Ki keys. Kc is used for in checksum mode; Ke and Ki are used in + encryption mode. + + * ``prf_len`` is the size of the result from the PRF+ function calculation. + + * ``block_len``, ``conf_len`` and ``cksum_len`` are the encryption block + length, confounder length and checksum length respectively. All three are + used in encryption mode, but only the checksum length is used in checksum + mode. + +The encoding type is looked up by number using the following function:: + + const struct krb5_enctype *crypto_krb5_find_enctype(u32 enctype); + +Key Derivation +============== + +Once the application has selected an encryption type, the keys that will be +used to do the actual crypto can be derived from the transport key. + +PRF+ Calculation +---------------- + +To aid in key derivation, a function to calculate the Kerberos GSSAPI +mechanism's PRF+ is provided:: + + int crypto_krb5_calc_PRFplus(const struct krb5_enctype *krb5, + const struct krb5_buffer *K, + unsigned int L, + const struct krb5_buffer *S, + struct krb5_buffer *result, + gfp_t gfp); + +This can be used to derive the transport key from a source key plus additional +data to limit its use. + +Crypto Functions +================ + +Once the keys have been derived, crypto can be performed on the data. The +caller must leave gaps in the buffer for the storage of the confounder (if +needed) and the checksum when preparing a message for transmission. An enum +and a pair of functions are provided to aid in this:: + + enum krb5_crypto_mode { + KRB5_CHECKSUM_MODE, + KRB5_ENCRYPT_MODE, + }; + + size_t crypto_krb5_how_much_buffer(const struct krb5_enctype *krb5, + enum krb5_crypto_mode mode, + size_t data_size, size_t *_offset); + + size_t crypto_krb5_how_much_data(const struct krb5_enctype *krb5, + enum krb5_crypto_mode mode, + size_t *_buffer_size, size_t *_offset); + +All these functions take the encoding type and an indication the mode of crypto +(checksum-only or full encryption). + +The first function returns how big the buffer will need to be to house a given +amount of data; the second function returns how much data will fit in a buffer +of a particular size, and adjusts down the size of the required buffer +accordingly. In both cases, the offset of the data within the buffer is also +returned. + +When a message has been received, the location and size of the data with the +message can be determined by calling:: + + void crypto_krb5_where_is_the_data(const struct krb5_enctype *krb5, + enum krb5_crypto_mode mode, + size_t *_offset, size_t *_len); + +The caller provides the offset and length of the message to the function, which +then alters those values to indicate the region containing the data (plus any +padding). It is up to the caller to determine how much padding there is. + +Preparation Functions +--------------------- + +Two functions are provided to allocated and prepare a crypto object for use by +the action functions:: + + struct crypto_aead * + crypto_krb5_prepare_encryption(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + u32 usage, gfp_t gfp); + struct crypto_shash * + crypto_krb5_prepare_checksum(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + u32 usage, gfp_t gfp); + +Both of these functions take the encoding type, the transport key and the usage +value used to derive the appropriate subkey(s). They create an appropriate +crypto object, an AEAD template for encryption and a synchronous hash for +checksumming, set the key(s) on it and configure it. The caller is expected to +pass these handles to the action functions below. + +Encryption Mode +--------------- + +A pair of functions are provided to encrypt and decrypt a message:: + + ssize_t crypto_krb5_encrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t sg_len, + size_t data_offset, size_t data_len, + bool preconfounded); + int crypto_krb5_decrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len); + +In both cases, the input and output buffers are indicated by the same +scatterlist. + +For the encryption function, the output buffer may be larger than is needed +(the amount of output generated is returned) and the location and size of the +data are indicated (which must match the encoding). If no confounder is set, +the function will insert one. + +For the decryption function, the offset and length of the message in buffer are +supplied and these are shrunk to fit the data. The decryption function will +verify any checksums within the message and give an error if they don't match. + +Checksum Mode +------------- + +A pair of function are provided to generate the checksum on a message and to +verify that checksum:: + + ssize_t crypto_krb5_get_mic(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, + size_t sg_len, + size_t data_offset, size_t data_len); + int crypto_krb5_verify_mic(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len); + +In both cases, the input and output buffers are indicated by the same +scatterlist. Additional metadata can be passed in which will get added to the +hash before the data. + +For the get_mic function, the output buffer may be larger than is needed (the +amount of output generated is returned) and the location and size of the data +are indicated (which must match the encoding). + +For the verification function, the offset and length of the message in buffer +are supplied and these are shrunk to fit the data. An error will be returned +if the checksums don't match. + +The krb5enc AEAD algorithm +========================== + +A template AEAD crypto algorithm, called "krb5enc", is provided that hashes the +plaintext before encrypting it (the reverse of authenc). The handle returned +by ``crypto_krb5_prepare_encryption()`` may be one of these, but there's no +requirement for the user of this API to interact with it directly. + +For reference, its key format begins with a BE32 of the format number. Only +format 1 is provided and that continues with a BE32 of the Ke key length +followed by a BE32 of the Ki key length, followed by the bytes from the Ke key +and then the Ki key. + +Using specifically ordered words means that the static test data doesn't +require byteswapping. diff --git a/Documentation/devicetree/bindings/crypto/fsl,sec2.0.yaml b/Documentation/devicetree/bindings/crypto/fsl,sec2.0.yaml new file mode 100644 index 000000000000..2091b89bb726 --- /dev/null +++ b/Documentation/devicetree/bindings/crypto/fsl,sec2.0.yaml @@ -0,0 +1,144 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/crypto/fsl,sec2.0.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Freescale SoC SEC Security Engines versions 1.x-2.x-3.x + +maintainers: + - J. Neuschäfer <j.ne@posteo.net> + +properties: + compatible: + description: + Should contain entries for this and backward compatible SEC versions, + high to low. Warning - SEC1 and SEC2 are mutually exclusive. + oneOf: + - items: + - const: fsl,sec3.3 + - const: fsl,sec3.1 + - const: fsl,sec3.0 + - const: fsl,sec2.4 + - const: fsl,sec2.2 + - const: fsl,sec2.1 + - const: fsl,sec2.0 + - items: + - const: fsl,sec3.1 + - const: fsl,sec3.0 + - const: fsl,sec2.4 + - const: fsl,sec2.2 + - const: fsl,sec2.1 + - const: fsl,sec2.0 + - items: + - const: fsl,sec3.0 + - const: fsl,sec2.4 + - const: fsl,sec2.2 + - const: fsl,sec2.1 + - const: fsl,sec2.0 + - items: + - const: fsl,sec2.4 + - const: fsl,sec2.2 + - const: fsl,sec2.1 + - const: fsl,sec2.0 + - items: + - const: fsl,sec2.2 + - const: fsl,sec2.1 + - const: fsl,sec2.0 + - items: + - const: fsl,sec2.1 + - const: fsl,sec2.0 + - items: + - const: fsl,sec2.0 + - items: + - const: fsl,sec1.2 + - const: fsl,sec1.0 + - items: + - const: fsl,sec1.0 + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + fsl,num-channels: + $ref: /schemas/types.yaml#/definitions/uint32 + enum: [ 1, 4 ] + description: An integer representing the number of channels available. + + fsl,channel-fifo-len: + $ref: /schemas/types.yaml#/definitions/uint32 + maximum: 100 + description: + An integer representing the number of descriptor pointers each channel + fetch fifo can hold. + + fsl,exec-units-mask: + $ref: /schemas/types.yaml#/definitions/uint32 + maximum: 0xfff + description: | + The bitmask representing what execution units (EUs) are available. + EU information should be encoded following the SEC's Descriptor Header + Dword EU_SEL0 field documentation, i.e. as follows: + + bit 0 = reserved - should be 0 + bit 1 = set if SEC has the ARC4 EU (AFEU) + bit 2 = set if SEC has the DES/3DES EU (DEU) + bit 3 = set if SEC has the message digest EU (MDEU/MDEU-A) + bit 4 = set if SEC has the random number generator EU (RNG) + bit 5 = set if SEC has the public key EU (PKEU) + bit 6 = set if SEC has the AES EU (AESU) + bit 7 = set if SEC has the Kasumi EU (KEU) + bit 8 = set if SEC has the CRC EU (CRCU) + bit 11 = set if SEC has the message digest EU extended alg set (MDEU-B) + + remaining bits are reserved for future SEC EUs. + + fsl,descriptor-types-mask: + $ref: /schemas/types.yaml#/definitions/uint32 + description: | + The bitmask representing what descriptors are available. Descriptor type + information should be encoded following the SEC's Descriptor Header Dword + DESC_TYPE field documentation, i.e. as follows: + + bit 0 = SEC supports descriptor type aesu_ctr_nonsnoop + bit 1 = SEC supports descriptor type ipsec_esp + bit 2 = SEC supports descriptor type common_nonsnoop + bit 3 = SEC supports descriptor type 802.11i AES ccmp + bit 4 = SEC supports descriptor type hmac_snoop_no_afeu + bit 5 = SEC supports descriptor type srtp + bit 6 = SEC supports descriptor type non_hmac_snoop_no_afeu + bit 7 = SEC supports descriptor type pkeu_assemble + bit 8 = SEC supports descriptor type aesu_key_expand_output + bit 9 = SEC supports descriptor type pkeu_ptmul + bit 10 = SEC supports descriptor type common_nonsnoop_afeu + bit 11 = SEC supports descriptor type pkeu_ptadd_dbl + + ..and so on and so forth. + +required: + - compatible + - reg + - fsl,num-channels + - fsl,channel-fifo-len + - fsl,exec-units-mask + - fsl,descriptor-types-mask + +unevaluatedProperties: false + +examples: + - | + /* MPC8548E */ + crypto@30000 { + compatible = "fsl,sec2.1", "fsl,sec2.0"; + reg = <0x30000 0x10000>; + interrupts = <29 2>; + interrupt-parent = <&mpic>; + fsl,num-channels = <4>; + fsl,channel-fifo-len = <24>; + fsl,exec-units-mask = <0xfe>; + fsl,descriptor-types-mask = <0x12b0ebf>; + }; + +... diff --git a/Documentation/devicetree/bindings/crypto/fsl-sec2.txt b/Documentation/devicetree/bindings/crypto/fsl-sec2.txt deleted file mode 100644 index 125f155d00d0..000000000000 --- a/Documentation/devicetree/bindings/crypto/fsl-sec2.txt +++ /dev/null @@ -1,65 +0,0 @@ -Freescale SoC SEC Security Engines versions 1.x-2.x-3.x - -Required properties: - -- compatible : Should contain entries for this and backward compatible - SEC versions, high to low, e.g., "fsl,sec2.1", "fsl,sec2.0" (SEC2/3) - e.g., "fsl,sec1.2", "fsl,sec1.0" (SEC1) - warning: SEC1 and SEC2 are mutually exclusive -- reg : Offset and length of the register set for the device -- interrupts : the SEC's interrupt number -- fsl,num-channels : An integer representing the number of channels - available. -- fsl,channel-fifo-len : An integer representing the number of - descriptor pointers each channel fetch fifo can hold. -- fsl,exec-units-mask : The bitmask representing what execution units - (EUs) are available. It's a single 32-bit cell. EU information - should be encoded following the SEC's Descriptor Header Dword - EU_SEL0 field documentation, i.e. as follows: - - bit 0 = reserved - should be 0 - bit 1 = set if SEC has the ARC4 EU (AFEU) - bit 2 = set if SEC has the DES/3DES EU (DEU) - bit 3 = set if SEC has the message digest EU (MDEU/MDEU-A) - bit 4 = set if SEC has the random number generator EU (RNG) - bit 5 = set if SEC has the public key EU (PKEU) - bit 6 = set if SEC has the AES EU (AESU) - bit 7 = set if SEC has the Kasumi EU (KEU) - bit 8 = set if SEC has the CRC EU (CRCU) - bit 11 = set if SEC has the message digest EU extended alg set (MDEU-B) - -remaining bits are reserved for future SEC EUs. - -- fsl,descriptor-types-mask : The bitmask representing what descriptors - are available. It's a single 32-bit cell. Descriptor type information - should be encoded following the SEC's Descriptor Header Dword DESC_TYPE - field documentation, i.e. as follows: - - bit 0 = set if SEC supports the aesu_ctr_nonsnoop desc. type - bit 1 = set if SEC supports the ipsec_esp descriptor type - bit 2 = set if SEC supports the common_nonsnoop desc. type - bit 3 = set if SEC supports the 802.11i AES ccmp desc. type - bit 4 = set if SEC supports the hmac_snoop_no_afeu desc. type - bit 5 = set if SEC supports the srtp descriptor type - bit 6 = set if SEC supports the non_hmac_snoop_no_afeu desc.type - bit 7 = set if SEC supports the pkeu_assemble descriptor type - bit 8 = set if SEC supports the aesu_key_expand_output desc.type - bit 9 = set if SEC supports the pkeu_ptmul descriptor type - bit 10 = set if SEC supports the common_nonsnoop_afeu desc. type - bit 11 = set if SEC supports the pkeu_ptadd_dbl descriptor type - - ..and so on and so forth. - -Example: - - /* MPC8548E */ - crypto@30000 { - compatible = "fsl,sec2.1", "fsl,sec2.0"; - reg = <0x30000 0x10000>; - interrupts = <29 2>; - interrupt-parent = <&mpic>; - fsl,num-channels = <4>; - fsl,channel-fifo-len = <24>; - fsl,exec-units-mask = <0xfe>; - fsl,descriptor-types-mask = <0x12b0ebf>; - }; diff --git a/Documentation/devicetree/bindings/crypto/inside-secure,safexcel-eip93.yaml b/Documentation/devicetree/bindings/crypto/inside-secure,safexcel-eip93.yaml new file mode 100644 index 000000000000..997bf9717f9e --- /dev/null +++ b/Documentation/devicetree/bindings/crypto/inside-secure,safexcel-eip93.yaml @@ -0,0 +1,67 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/crypto/inside-secure,safexcel-eip93.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Inside Secure SafeXcel EIP-93 cryptographic engine + +maintainers: + - Christian Marangi <ansuelsmth@gmail.com> + +description: | + The Inside Secure SafeXcel EIP-93 is a cryptographic engine IP block + integrated in varios devices with very different and generic name from + PKTE to simply vendor+EIP93. The real IP under the hood is actually + developed by Inside Secure and given to license to vendors. + + The IP block is sold with different model based on what feature are + needed and are identified with the final letter. Each letter correspond + to a specific set of feature and multiple letter reflect the sum of the + feature set. + + EIP-93 models: + - EIP-93i: (basic) DES/Triple DES, AES, PRNG, IPsec ESP, SRTP, SHA1 + - EIP-93ie: i + SHA224/256, AES-192/256 + - EIP-93is: i + SSL/DTLS/DTLS, MD5, ARC4 + - EIP-93ies: i + e + s + - EIP-93iw: i + AES-XCB-MAC, AES-CCM + +properties: + compatible: + oneOf: + - items: + - const: airoha,en7581-eip93 + - const: inside-secure,safexcel-eip93ies + - items: + - not: {} + description: Need a SoC specific compatible + - enum: + - inside-secure,safexcel-eip93i + - inside-secure,safexcel-eip93ie + - inside-secure,safexcel-eip93is + - inside-secure,safexcel-eip93iw + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + +required: + - compatible + - reg + - interrupts + +additionalProperties: false + +examples: + - | + #include <dt-bindings/interrupt-controller/arm-gic.h> + + crypto@1e004000 { + compatible = "airoha,en7581-eip93", "inside-secure,safexcel-eip93ies"; + reg = <0x1fb70000 0x1000>; + + interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>; + }; diff --git a/Documentation/devicetree/bindings/crypto/inside-secure,safexcel.yaml b/Documentation/devicetree/bindings/crypto/inside-secure,safexcel.yaml index ef07258d16c1..343e2d04c797 100644 --- a/Documentation/devicetree/bindings/crypto/inside-secure,safexcel.yaml +++ b/Documentation/devicetree/bindings/crypto/inside-secure,safexcel.yaml @@ -47,6 +47,8 @@ properties: - const: core - const: reg + dma-coherent: true + required: - reg - interrupts diff --git a/Documentation/devicetree/bindings/crypto/qcom,prng.yaml b/Documentation/devicetree/bindings/crypto/qcom,prng.yaml index 5e6f8b642545..ed7e16bd11d3 100644 --- a/Documentation/devicetree/bindings/crypto/qcom,prng.yaml +++ b/Documentation/devicetree/bindings/crypto/qcom,prng.yaml @@ -20,6 +20,7 @@ properties: - qcom,ipq5332-trng - qcom,ipq5424-trng - qcom,ipq9574-trng + - qcom,qcs615-trng - qcom,qcs8300-trng - qcom,sa8255p-trng - qcom,sa8775p-trng diff --git a/Documentation/devicetree/bindings/crypto/qcom-qce.yaml b/Documentation/devicetree/bindings/crypto/qcom-qce.yaml index 3ed56d9d378e..3f35122f7873 100644 --- a/Documentation/devicetree/bindings/crypto/qcom-qce.yaml +++ b/Documentation/devicetree/bindings/crypto/qcom-qce.yaml @@ -55,6 +55,7 @@ properties: - qcom,sm8550-qce - qcom,sm8650-qce - qcom,sm8750-qce + - qcom,x1e80100-qce - const: qcom,sm8150-qce - const: qcom,qce diff --git a/Documentation/devicetree/bindings/rng/rockchip,rk3588-rng.yaml b/Documentation/devicetree/bindings/rng/rockchip,rk3588-rng.yaml new file mode 100644 index 000000000000..ca71b400bcae --- /dev/null +++ b/Documentation/devicetree/bindings/rng/rockchip,rk3588-rng.yaml @@ -0,0 +1,59 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/rng/rockchip,rk3588-rng.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Rockchip RK3588 TRNG + +description: True Random Number Generator on Rockchip RK3588 SoC + +maintainers: + - Nicolas Frattaroli <nicolas.frattaroli@collabora.com> + +properties: + compatible: + enum: + - rockchip,rk3588-rng + + reg: + maxItems: 1 + + clocks: + items: + - description: TRNG AHB clock + + interrupts: + maxItems: 1 + + resets: + maxItems: 1 + +required: + - compatible + - reg + - clocks + - interrupts + +additionalProperties: false + +examples: + - | + #include <dt-bindings/clock/rockchip,rk3588-cru.h> + #include <dt-bindings/interrupt-controller/arm-gic.h> + #include <dt-bindings/interrupt-controller/irq.h> + #include <dt-bindings/reset/rockchip,rk3588-cru.h> + bus { + #address-cells = <2>; + #size-cells = <2>; + + rng@fe378000 { + compatible = "rockchip,rk3588-rng"; + reg = <0x0 0xfe378000 0x0 0x200>; + interrupts = <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH 0>; + clocks = <&scmi_clk SCMI_HCLK_SECURE_NS>; + resets = <&scmi_reset SCMI_SRST_H_TRNG_NS>; + }; + }; + +... diff --git a/MAINTAINERS b/MAINTAINERS index e23215986c60..3347bd1dd615 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3610,14 +3610,42 @@ F: drivers/hwmon/asus_wmi_sensors.c ASYMMETRIC KEYS M: David Howells <dhowells@redhat.com> +M: Lukas Wunner <lukas@wunner.de> +M: Ignat Korchagin <ignat@cloudflare.com> L: keyrings@vger.kernel.org +L: linux-crypto@vger.kernel.org S: Maintained F: Documentation/crypto/asymmetric-keys.rst F: crypto/asymmetric_keys/ F: include/crypto/pkcs7.h F: include/crypto/public_key.h +F: include/keys/asymmetric-*.h F: include/linux/verification.h +ASYMMETRIC KEYS - ECDSA +M: Lukas Wunner <lukas@wunner.de> +M: Ignat Korchagin <ignat@cloudflare.com> +R: Stefan Berger <stefanb@linux.ibm.com> +L: linux-crypto@vger.kernel.org +S: Maintained +F: crypto/ecc* +F: crypto/ecdsa* +F: include/crypto/ecc* + +ASYMMETRIC KEYS - GOST +M: Lukas Wunner <lukas@wunner.de> +M: Ignat Korchagin <ignat@cloudflare.com> +L: linux-crypto@vger.kernel.org +S: Odd fixes +F: crypto/ecrdsa* + +ASYMMETRIC KEYS - RSA +M: Lukas Wunner <lukas@wunner.de> +M: Ignat Korchagin <ignat@cloudflare.com> +L: linux-crypto@vger.kernel.org +S: Maintained +F: crypto/rsa* + ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API R: Dan Williams <dan.j.williams@intel.com> S: Odd fixes @@ -11599,6 +11627,13 @@ L: linux-crypto@vger.kernel.org S: Maintained F: drivers/crypto/inside-secure/ +INSIDE SECURE EIP93 CRYPTO DRIVER +M: Christian Marangi <ansuelsmth@gmail.com> +L: linux-crypto@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/crypto/inside-secure,safexcel-eip93.yaml +F: drivers/crypto/inside-secure/eip93/ + INTEGRITY MEASUREMENT ARCHITECTURE (IMA) M: Mimi Zohar <zohar@linux.ibm.com> M: Roberto Sassu <roberto.sassu@huawei.com> @@ -11802,6 +11837,7 @@ F: drivers/dma/ioat* INTEL IAA CRYPTO DRIVER M: Kristen Accardi <kristen.c.accardi@intel.com> +M: Vinicius Costa Gomes <vinicius.gomes@intel.com> L: linux-crypto@vger.kernel.org S: Supported F: Documentation/driver-api/crypto/iaa/iaa-crypto.rst @@ -20675,8 +20711,10 @@ F: include/uapi/linux/rkisp1-config.h ROCKCHIP RK3568 RANDOM NUMBER GENERATOR SUPPORT M: Daniel Golle <daniel@makrotopia.org> M: Aurelien Jarno <aurelien@aurel32.net> +M: Nicolas Frattaroli <nicolas.frattaroli@collabora.com> S: Maintained F: Documentation/devicetree/bindings/rng/rockchip,rk3568-rng.yaml +F: Documentation/devicetree/bindings/rng/rockchip,rk3588-rng.yaml F: drivers/char/hw_random/rockchip-rng.c ROCKCHIP RASTER 2D GRAPHIC ACCELERATION UNIT DRIVER @@ -26493,6 +26531,7 @@ F: mm/zsmalloc.c ZSTD M: Nick Terrell <terrelln@fb.com> +M: David Sterba <dsterba@suse.com> S: Maintained B: https://github.com/facebook/zstd/issues T: git https://github.com/terrelln/linux.git diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig index 32650c8431d9..23e4ea067ddb 100644 --- a/arch/arm/crypto/Kconfig +++ b/arch/arm/crypto/Kconfig @@ -3,10 +3,12 @@ menu "Accelerated Cryptographic Algorithms for CPU (arm)" config CRYPTO_CURVE25519_NEON - tristate "Public key crypto: Curve25519 (NEON)" + tristate depends on KERNEL_MODE_NEON + select CRYPTO_KPP select CRYPTO_LIB_CURVE25519_GENERIC select CRYPTO_ARCH_HAVE_LIB_CURVE25519 + default CRYPTO_LIB_CURVE25519_INTERNAL help Curve25519 algorithm @@ -45,9 +47,10 @@ config CRYPTO_NHPOLY1305_NEON - NEON (Advanced SIMD) extensions config CRYPTO_POLY1305_ARM - tristate "Hash functions: Poly1305 (NEON)" + tristate select CRYPTO_HASH select CRYPTO_ARCH_HAVE_LIB_POLY1305 + default CRYPTO_LIB_POLY1305_INTERNAL help Poly1305 authenticator algorithm (RFC7539) @@ -212,9 +215,10 @@ config CRYPTO_AES_ARM_CE - ARMv8 Crypto Extensions config CRYPTO_CHACHA20_NEON - tristate "Ciphers: ChaCha20, XChaCha20, XChaCha12 (NEON)" + tristate select CRYPTO_SKCIPHER select CRYPTO_ARCH_HAVE_LIB_CHACHA + default CRYPTO_LIB_CHACHA_INTERNAL help Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12 stream cipher algorithms diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c index 21df5e7f51f9..1cf61f51e766 100644 --- a/arch/arm/crypto/aes-ce-glue.c +++ b/arch/arm/crypto/aes-ce-glue.c @@ -399,9 +399,9 @@ static int ctr_encrypt(struct skcipher_request *req) } if (walk.nbytes) { u8 __aligned(8) tail[AES_BLOCK_SIZE]; + const u8 *tsrc = walk.src.virt.addr; unsigned int nbytes = walk.nbytes; u8 *tdst = walk.dst.virt.addr; - u8 *tsrc = walk.src.virt.addr; /* * Tell aes_ctr_encrypt() to process a tail block. diff --git a/arch/arm/crypto/chacha-glue.c b/arch/arm/crypto/chacha-glue.c index cdde8fd01f8f..50e635512046 100644 --- a/arch/arm/crypto/chacha-glue.c +++ b/arch/arm/crypto/chacha-glue.c @@ -76,12 +76,6 @@ void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds) } EXPORT_SYMBOL(hchacha_block_arch); -void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv) -{ - chacha_init_generic(state, key, iv); -} -EXPORT_SYMBOL(chacha_init_arch); - void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, int nrounds) { @@ -116,7 +110,7 @@ static int chacha_stream_xor(struct skcipher_request *req, err = skcipher_walk_virt(&walk, req, false); - chacha_init_generic(state, ctx->key, iv); + chacha_init(state, ctx->key, iv); while (walk.nbytes > 0) { unsigned int nbytes = walk.nbytes; @@ -166,7 +160,7 @@ static int do_xchacha(struct skcipher_request *req, bool neon) u32 state[16]; u8 real_iv[16]; - chacha_init_generic(state, ctx->key, req->iv); + chacha_init(state, ctx->key, req->iv); if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon) { hchacha_block_arm(state, subctx.key, ctx->nrounds); diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c index 3af997082534..aabfcf522a2c 100644 --- a/arch/arm/crypto/ghash-ce-glue.c +++ b/arch/arm/crypto/ghash-ce-glue.c @@ -55,10 +55,6 @@ struct ghash_desc_ctx { u32 count; }; -struct ghash_async_ctx { - struct cryptd_ahash *cryptd_tfm; -}; - asmlinkage void pmull_ghash_update_p64(int blocks, u64 dg[], const char *src, u64 const h[][2], const char *head); @@ -78,34 +74,12 @@ static int ghash_init(struct shash_desc *desc) static void ghash_do_update(int blocks, u64 dg[], const char *src, struct ghash_key *key, const char *head) { - if (likely(crypto_simd_usable())) { - kernel_neon_begin(); - if (static_branch_likely(&use_p64)) - pmull_ghash_update_p64(blocks, dg, src, key->h, head); - else - pmull_ghash_update_p8(blocks, dg, src, key->h, head); - kernel_neon_end(); - } else { - be128 dst = { cpu_to_be64(dg[1]), cpu_to_be64(dg[0]) }; - - do { - const u8 *in = src; - - if (head) { - in = head; - blocks++; - head = NULL; - } else { - src += GHASH_BLOCK_SIZE; - } - - crypto_xor((u8 *)&dst, in, GHASH_BLOCK_SIZE); - gf128mul_lle(&dst, &key->k); - } while (--blocks); - - dg[0] = be64_to_cpu(dst.b); - dg[1] = be64_to_cpu(dst.a); - } + kernel_neon_begin(); + if (static_branch_likely(&use_p64)) + pmull_ghash_update_p64(blocks, dg, src, key->h, head); + else + pmull_ghash_update_p8(blocks, dg, src, key->h, head); + kernel_neon_end(); } static int ghash_update(struct shash_desc *desc, const u8 *src, @@ -206,162 +180,13 @@ static struct shash_alg ghash_alg = { .descsize = sizeof(struct ghash_desc_ctx), .base.cra_name = "ghash", - .base.cra_driver_name = "ghash-ce-sync", - .base.cra_priority = 300 - 1, + .base.cra_driver_name = "ghash-ce", + .base.cra_priority = 300, .base.cra_blocksize = GHASH_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct ghash_key) + sizeof(u64[2]), .base.cra_module = THIS_MODULE, }; -static int ghash_async_init(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); - struct ahash_request *cryptd_req = ahash_request_ctx(req); - struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; - struct shash_desc *desc = cryptd_shash_desc(cryptd_req); - struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm); - - desc->tfm = child; - return crypto_shash_init(desc); -} - -static int ghash_async_update(struct ahash_request *req) -{ - struct ahash_request *cryptd_req = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); - struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; - - if (!crypto_simd_usable() || - (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { - memcpy(cryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); - return crypto_ahash_update(cryptd_req); - } else { - struct shash_desc *desc = cryptd_shash_desc(cryptd_req); - return shash_ahash_update(req, desc); - } -} - -static int ghash_async_final(struct ahash_request *req) -{ - struct ahash_request *cryptd_req = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); - struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; - - if (!crypto_simd_usable() || - (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { - memcpy(cryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); - return crypto_ahash_final(cryptd_req); - } else { - struct shash_desc *desc = cryptd_shash_desc(cryptd_req); - return crypto_shash_final(desc, req->result); - } -} - -static int ghash_async_digest(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); - struct ahash_request *cryptd_req = ahash_request_ctx(req); - struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; - - if (!crypto_simd_usable() || - (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { - memcpy(cryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); - return crypto_ahash_digest(cryptd_req); - } else { - struct shash_desc *desc = cryptd_shash_desc(cryptd_req); - struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm); - - desc->tfm = child; - return shash_ahash_digest(req, desc); - } -} - -static int ghash_async_import(struct ahash_request *req, const void *in) -{ - struct ahash_request *cryptd_req = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); - struct shash_desc *desc = cryptd_shash_desc(cryptd_req); - - desc->tfm = cryptd_ahash_child(ctx->cryptd_tfm); - - return crypto_shash_import(desc, in); -} - -static int ghash_async_export(struct ahash_request *req, void *out) -{ - struct ahash_request *cryptd_req = ahash_request_ctx(req); - struct shash_desc *desc = cryptd_shash_desc(cryptd_req); - - return crypto_shash_export(desc, out); -} - -static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key, - unsigned int keylen) -{ - struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); - struct crypto_ahash *child = &ctx->cryptd_tfm->base; - - crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK); - crypto_ahash_set_flags(child, crypto_ahash_get_flags(tfm) - & CRYPTO_TFM_REQ_MASK); - return crypto_ahash_setkey(child, key, keylen); -} - -static int ghash_async_init_tfm(struct crypto_tfm *tfm) -{ - struct cryptd_ahash *cryptd_tfm; - struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm); - - cryptd_tfm = cryptd_alloc_ahash("ghash-ce-sync", 0, 0); - if (IS_ERR(cryptd_tfm)) - return PTR_ERR(cryptd_tfm); - ctx->cryptd_tfm = cryptd_tfm; - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct ahash_request) + - crypto_ahash_reqsize(&cryptd_tfm->base)); - - return 0; -} - -static void ghash_async_exit_tfm(struct crypto_tfm *tfm) -{ - struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm); - - cryptd_free_ahash(ctx->cryptd_tfm); -} - -static struct ahash_alg ghash_async_alg = { - .init = ghash_async_init, - .update = ghash_async_update, - .final = ghash_async_final, - .setkey = ghash_async_setkey, - .digest = ghash_async_digest, - .import = ghash_async_import, - .export = ghash_async_export, - .halg.digestsize = GHASH_DIGEST_SIZE, - .halg.statesize = sizeof(struct ghash_desc_ctx), - .halg.base = { - .cra_name = "ghash", - .cra_driver_name = "ghash-ce", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = GHASH_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct ghash_async_ctx), - .cra_module = THIS_MODULE, - .cra_init = ghash_async_init_tfm, - .cra_exit = ghash_async_exit_tfm, - }, -}; - - void pmull_gcm_encrypt(int blocks, u64 dg[], const char *src, struct gcm_key const *k, char *dst, const char *iv, int rounds, u32 counter); @@ -459,17 +284,11 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[], u32 len) scatterwalk_start(&walk, req->src); do { - u32 n = scatterwalk_clamp(&walk, len); - u8 *p; - - if (!n) { - scatterwalk_start(&walk, sg_next(walk.sg)); - n = scatterwalk_clamp(&walk, len); - } + unsigned int n; - p = scatterwalk_map(&walk); - gcm_update_mac(dg, p, n, buf, &buf_count, ctx); - scatterwalk_unmap(p); + n = scatterwalk_next(&walk, len); + gcm_update_mac(dg, walk.addr, n, buf, &buf_count, ctx); + scatterwalk_done_src(&walk, n); if (unlikely(len / SZ_4K > (len - n) / SZ_4K)) { kernel_neon_end(); @@ -477,8 +296,6 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[], u32 len) } len -= n; - scatterwalk_advance(&walk, n); - scatterwalk_done(&walk, 0, len); } while (len); if (buf_count) { @@ -767,14 +584,9 @@ static int __init ghash_ce_mod_init(void) err = crypto_register_shash(&ghash_alg); if (err) goto err_aead; - err = crypto_register_ahash(&ghash_async_alg); - if (err) - goto err_shash; return 0; -err_shash: - crypto_unregister_shash(&ghash_alg); err_aead: if (elf_hwcap2 & HWCAP2_PMULL) crypto_unregister_aeads(gcm_aes_algs, @@ -784,7 +596,6 @@ err_aead: static void __exit ghash_ce_mod_exit(void) { - crypto_unregister_ahash(&ghash_async_alg); crypto_unregister_shash(&ghash_alg); if (elf_hwcap2 & HWCAP2_PMULL) crypto_unregister_aeads(gcm_aes_algs, diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig index 5636ab83f22a..3418c8d3c78d 100644 --- a/arch/arm64/crypto/Kconfig +++ b/arch/arm64/crypto/Kconfig @@ -26,10 +26,11 @@ config CRYPTO_NHPOLY1305_NEON - NEON (Advanced SIMD) extensions config CRYPTO_POLY1305_NEON - tristate "Hash functions: Poly1305 (NEON)" + tristate depends on KERNEL_MODE_NEON select CRYPTO_HASH select CRYPTO_ARCH_HAVE_LIB_POLY1305 + default CRYPTO_LIB_POLY1305_INTERNAL help Poly1305 authenticator algorithm (RFC7539) @@ -186,11 +187,12 @@ config CRYPTO_AES_ARM64_NEON_BLK - NEON (Advanced SIMD) extensions config CRYPTO_CHACHA20_NEON - tristate "Ciphers: ChaCha (NEON)" + tristate depends on KERNEL_MODE_NEON select CRYPTO_SKCIPHER select CRYPTO_LIB_CHACHA_GENERIC select CRYPTO_ARCH_HAVE_LIB_CHACHA + default CRYPTO_LIB_CHACHA_INTERNAL help Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12 stream cipher algorithms diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index a2b5d6f20f4d..2d791d51891b 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c @@ -156,23 +156,13 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) scatterwalk_start(&walk, req->src); do { - u32 n = scatterwalk_clamp(&walk, len); - u8 *p; - - if (!n) { - scatterwalk_start(&walk, sg_next(walk.sg)); - n = scatterwalk_clamp(&walk, len); - } - p = scatterwalk_map(&walk); - - macp = ce_aes_ccm_auth_data(mac, p, n, macp, ctx->key_enc, - num_rounds(ctx)); + unsigned int n; + n = scatterwalk_next(&walk, len); + macp = ce_aes_ccm_auth_data(mac, walk.addr, n, macp, + ctx->key_enc, num_rounds(ctx)); + scatterwalk_done_src(&walk, n); len -= n; - - scatterwalk_unmap(p); - scatterwalk_advance(&walk, n); - scatterwalk_done(&walk, 0, len); } while (len); } diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c index 46425e7b9755..c4a623e86593 100644 --- a/arch/arm64/crypto/aes-neonbs-glue.c +++ b/arch/arm64/crypto/aes-neonbs-glue.c @@ -287,7 +287,8 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt, struct skcipher_walk walk; int nbytes, err; int first = 1; - u8 *out, *in; + const u8 *in; + u8 *out; if (req->cryptlen < AES_BLOCK_SIZE) return -EINVAL; diff --git a/arch/arm64/crypto/chacha-neon-glue.c b/arch/arm64/crypto/chacha-neon-glue.c index af2bbca38e70..229876acfc58 100644 --- a/arch/arm64/crypto/chacha-neon-glue.c +++ b/arch/arm64/crypto/chacha-neon-glue.c @@ -74,12 +74,6 @@ void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds) } EXPORT_SYMBOL(hchacha_block_arch); -void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv) -{ - chacha_init_generic(state, key, iv); -} -EXPORT_SYMBOL(chacha_init_arch); - void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, int nrounds) { @@ -110,7 +104,7 @@ static int chacha_neon_stream_xor(struct skcipher_request *req, err = skcipher_walk_virt(&walk, req, false); - chacha_init_generic(state, ctx->key, iv); + chacha_init(state, ctx->key, iv); while (walk.nbytes > 0) { unsigned int nbytes = walk.nbytes; @@ -151,7 +145,7 @@ static int xchacha_neon(struct skcipher_request *req) u32 state[16]; u8 real_iv[16]; - chacha_init_generic(state, ctx->key, req->iv); + chacha_init(state, ctx->key, req->iv); hchacha_block_arch(state, subctx.key, ctx->nrounds); subctx.nrounds = ctx->nrounds; diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index da7b7ec1a664..071e122f9c37 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -308,21 +308,12 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[], u32 len) scatterwalk_start(&walk, req->src); do { - u32 n = scatterwalk_clamp(&walk, len); - u8 *p; + unsigned int n; - if (!n) { - scatterwalk_start(&walk, sg_next(walk.sg)); - n = scatterwalk_clamp(&walk, len); - } - p = scatterwalk_map(&walk); - - gcm_update_mac(dg, p, n, buf, &buf_count, ctx); + n = scatterwalk_next(&walk, len); + gcm_update_mac(dg, walk.addr, n, buf, &buf_count, ctx); + scatterwalk_done_src(&walk, n); len -= n; - - scatterwalk_unmap(p); - scatterwalk_advance(&walk, n); - scatterwalk_done(&walk, 0, len); } while (len); if (buf_count) { diff --git a/arch/arm64/crypto/sm4-ce-ccm-glue.c b/arch/arm64/crypto/sm4-ce-ccm-glue.c index 5e7e17bbec81..e9cc1c1364ec 100644 --- a/arch/arm64/crypto/sm4-ce-ccm-glue.c +++ b/arch/arm64/crypto/sm4-ce-ccm-glue.c @@ -112,17 +112,12 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) scatterwalk_start(&walk, req->src); do { - u32 n = scatterwalk_clamp(&walk, assoclen); - u8 *p, *ptr; + unsigned int n, orig_n; + const u8 *p; - if (!n) { - scatterwalk_start(&walk, sg_next(walk.sg)); - n = scatterwalk_clamp(&walk, assoclen); - } - - p = ptr = scatterwalk_map(&walk); - assoclen -= n; - scatterwalk_advance(&walk, n); + orig_n = scatterwalk_next(&walk, assoclen); + p = walk.addr; + n = orig_n; while (n > 0) { unsigned int l, nblocks; @@ -136,9 +131,9 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) } else { nblocks = n / SM4_BLOCK_SIZE; sm4_ce_cbcmac_update(ctx->rkey_enc, - mac, ptr, nblocks); + mac, p, nblocks); - ptr += nblocks * SM4_BLOCK_SIZE; + p += nblocks * SM4_BLOCK_SIZE; n %= SM4_BLOCK_SIZE; continue; @@ -147,15 +142,15 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) l = min(n, SM4_BLOCK_SIZE - len); if (l) { - crypto_xor(mac + len, ptr, l); + crypto_xor(mac + len, p, l); len += l; - ptr += l; + p += l; n -= l; } } - scatterwalk_unmap(p); - scatterwalk_done(&walk, 0, assoclen); + scatterwalk_done_src(&walk, orig_n); + assoclen -= orig_n; } while (assoclen); } diff --git a/arch/arm64/crypto/sm4-ce-gcm-glue.c b/arch/arm64/crypto/sm4-ce-gcm-glue.c index 73bfb6972d3a..c2ea3d5f690b 100644 --- a/arch/arm64/crypto/sm4-ce-gcm-glue.c +++ b/arch/arm64/crypto/sm4-ce-gcm-glue.c @@ -82,20 +82,15 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u8 ghash[]) scatterwalk_start(&walk, req->src); do { - u32 n = scatterwalk_clamp(&walk, assoclen); - u8 *p, *ptr; + unsigned int n, orig_n; + const u8 *p; - if (!n) { - scatterwalk_start(&walk, sg_next(walk.sg)); - n = scatterwalk_clamp(&walk, assoclen); - } - - p = ptr = scatterwalk_map(&walk); - assoclen -= n; - scatterwalk_advance(&walk, n); + orig_n = scatterwalk_next(&walk, assoclen); + p = walk.addr; + n = orig_n; if (n + buflen < GHASH_BLOCK_SIZE) { - memcpy(&buffer[buflen], ptr, n); + memcpy(&buffer[buflen], p, n); buflen += n; } else { unsigned int nblocks; @@ -103,8 +98,8 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u8 ghash[]) if (buflen) { unsigned int l = GHASH_BLOCK_SIZE - buflen; - memcpy(&buffer[buflen], ptr, l); - ptr += l; + memcpy(&buffer[buflen], p, l); + p += l; n -= l; pmull_ghash_update(ctx->ghash_table, ghash, @@ -114,17 +109,17 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u8 ghash[]) nblocks = n / GHASH_BLOCK_SIZE; if (nblocks) { pmull_ghash_update(ctx->ghash_table, ghash, - ptr, nblocks); - ptr += nblocks * GHASH_BLOCK_SIZE; + p, nblocks); + p += nblocks * GHASH_BLOCK_SIZE; } buflen = n % GHASH_BLOCK_SIZE; if (buflen) - memcpy(&buffer[0], ptr, buflen); + memcpy(&buffer[0], p, buflen); } - scatterwalk_unmap(p); - scatterwalk_done(&walk, 0, assoclen); + scatterwalk_done_src(&walk, orig_n); + assoclen -= orig_n; } while (assoclen); /* padding with '0' */ diff --git a/arch/mips/crypto/Kconfig b/arch/mips/crypto/Kconfig index 7decd40c4e20..545fc0e12422 100644 --- a/arch/mips/crypto/Kconfig +++ b/arch/mips/crypto/Kconfig @@ -3,9 +3,11 @@ menu "Accelerated Cryptographic Algorithms for CPU (mips)" config CRYPTO_POLY1305_MIPS - tristate "Hash functions: Poly1305" + tristate depends on MIPS + select CRYPTO_HASH select CRYPTO_ARCH_HAVE_LIB_POLY1305 + default CRYPTO_LIB_POLY1305_INTERNAL help Poly1305 authenticator algorithm (RFC7539) @@ -52,10 +54,11 @@ config CRYPTO_SHA512_OCTEON Architecture: mips OCTEON using crypto instructions, when available config CRYPTO_CHACHA_MIPS - tristate "Ciphers: ChaCha20, XChaCha20, XChaCha12 (MIPS32r2)" + tristate depends on CPU_MIPS32_R2 select CRYPTO_SKCIPHER select CRYPTO_ARCH_HAVE_LIB_CHACHA + default CRYPTO_LIB_CHACHA_INTERNAL help Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12 stream cipher algorithms diff --git a/arch/mips/crypto/chacha-glue.c b/arch/mips/crypto/chacha-glue.c index d1fd23e6ef84..f6fc2e1079a1 100644 --- a/arch/mips/crypto/chacha-glue.c +++ b/arch/mips/crypto/chacha-glue.c @@ -20,12 +20,6 @@ EXPORT_SYMBOL(chacha_crypt_arch); asmlinkage void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds); EXPORT_SYMBOL(hchacha_block_arch); -void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv) -{ - chacha_init_generic(state, key, iv); -} -EXPORT_SYMBOL(chacha_init_arch); - static int chacha_mips_stream_xor(struct skcipher_request *req, const struct chacha_ctx *ctx, const u8 *iv) { @@ -35,7 +29,7 @@ static int chacha_mips_stream_xor(struct skcipher_request *req, err = skcipher_walk_virt(&walk, req, false); - chacha_init_generic(state, ctx->key, iv); + chacha_init(state, ctx->key, iv); while (walk.nbytes > 0) { unsigned int nbytes = walk.nbytes; @@ -67,7 +61,7 @@ static int xchacha_mips(struct skcipher_request *req) u32 state[16]; u8 real_iv[16]; - chacha_init_generic(state, ctx->key, req->iv); + chacha_init(state, ctx->key, req->iv); hchacha_block(state, subctx.key, ctx->nrounds); subctx.nrounds = ctx->nrounds; diff --git a/arch/powerpc/crypto/Kconfig b/arch/powerpc/crypto/Kconfig index 5b315e9756b3..370db8192ce6 100644 --- a/arch/powerpc/crypto/Kconfig +++ b/arch/powerpc/crypto/Kconfig @@ -3,10 +3,12 @@ menu "Accelerated Cryptographic Algorithms for CPU (powerpc)" config CRYPTO_CURVE25519_PPC64 - tristate "Public key crypto: Curve25519 (PowerPC64)" + tristate depends on PPC64 && CPU_LITTLE_ENDIAN + select CRYPTO_KPP select CRYPTO_LIB_CURVE25519_GENERIC select CRYPTO_ARCH_HAVE_LIB_CURVE25519 + default CRYPTO_LIB_CURVE25519_INTERNAL help Curve25519 algorithm @@ -91,11 +93,12 @@ config CRYPTO_AES_GCM_P10 later CPU. This module supports stitched acceleration for AES/GCM. config CRYPTO_CHACHA20_P10 - tristate "Ciphers: ChaCha20, XChacha20, XChacha12 (P10 or later)" + tristate depends on PPC64 && CPU_LITTLE_ENDIAN && VSX select CRYPTO_SKCIPHER select CRYPTO_LIB_CHACHA_GENERIC select CRYPTO_ARCH_HAVE_LIB_CHACHA + default CRYPTO_LIB_CHACHA_INTERNAL help Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12 stream cipher algorithms diff --git a/arch/powerpc/crypto/aes-gcm-p10-glue.c b/arch/powerpc/crypto/aes-gcm-p10-glue.c index 679f52794baf..85f4fd4b1bdc 100644 --- a/arch/powerpc/crypto/aes-gcm-p10-glue.c +++ b/arch/powerpc/crypto/aes-gcm-p10-glue.c @@ -35,9 +35,9 @@ MODULE_ALIAS_CRYPTO("aes"); asmlinkage int aes_p10_set_encrypt_key(const u8 *userKey, const int bits, void *key); asmlinkage void aes_p10_encrypt(const u8 *in, u8 *out, const void *key); -asmlinkage void aes_p10_gcm_encrypt(u8 *in, u8 *out, size_t len, +asmlinkage void aes_p10_gcm_encrypt(const u8 *in, u8 *out, size_t len, void *rkey, u8 *iv, void *Xi); -asmlinkage void aes_p10_gcm_decrypt(u8 *in, u8 *out, size_t len, +asmlinkage void aes_p10_gcm_decrypt(const u8 *in, u8 *out, size_t len, void *rkey, u8 *iv, void *Xi); asmlinkage void gcm_init_htable(unsigned char htable[], unsigned char Xi[]); asmlinkage void gcm_ghash_p10(unsigned char *Xi, unsigned char *Htable, @@ -261,7 +261,7 @@ static int p10_aes_gcm_crypt(struct aead_request *req, u8 *riv, return ret; while ((nbytes = walk.nbytes) > 0 && ret == 0) { - u8 *src = walk.src.virt.addr; + const u8 *src = walk.src.virt.addr; u8 *dst = walk.dst.virt.addr; u8 buf[AES_BLOCK_SIZE]; diff --git a/arch/powerpc/crypto/aes_ctr.c b/arch/powerpc/crypto/aes_ctr.c index 9a3da8cd62f3..3da75f42529a 100644 --- a/arch/powerpc/crypto/aes_ctr.c +++ b/arch/powerpc/crypto/aes_ctr.c @@ -69,9 +69,9 @@ static int p8_aes_ctr_setkey(struct crypto_skcipher *tfm, const u8 *key, static void p8_aes_ctr_final(const struct p8_aes_ctr_ctx *ctx, struct skcipher_walk *walk) { + const u8 *src = walk->src.virt.addr; u8 *ctrblk = walk->iv; u8 keystream[AES_BLOCK_SIZE]; - u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; unsigned int nbytes = walk->nbytes; diff --git a/arch/powerpc/crypto/chacha-p10-glue.c b/arch/powerpc/crypto/chacha-p10-glue.c index 7c728755852e..d8796decc1fb 100644 --- a/arch/powerpc/crypto/chacha-p10-glue.c +++ b/arch/powerpc/crypto/chacha-p10-glue.c @@ -57,12 +57,6 @@ void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds) } EXPORT_SYMBOL(hchacha_block_arch); -void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv) -{ - chacha_init_generic(state, key, iv); -} -EXPORT_SYMBOL(chacha_init_arch); - void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, int nrounds) { @@ -95,7 +89,7 @@ static int chacha_p10_stream_xor(struct skcipher_request *req, if (err) return err; - chacha_init_generic(state, ctx->key, iv); + chacha_init(state, ctx->key, iv); while (walk.nbytes > 0) { unsigned int nbytes = walk.nbytes; @@ -137,7 +131,7 @@ static int xchacha_p10(struct skcipher_request *req) u32 state[16]; u8 real_iv[16]; - chacha_init_generic(state, ctx->key, req->iv); + chacha_init(state, ctx->key, req->iv); hchacha_block_arch(state, subctx.key, ctx->nrounds); subctx.nrounds = ctx->nrounds; diff --git a/arch/riscv/crypto/Kconfig b/arch/riscv/crypto/Kconfig index ad58dad9a580..c67095a3d669 100644 --- a/arch/riscv/crypto/Kconfig +++ b/arch/riscv/crypto/Kconfig @@ -22,7 +22,6 @@ config CRYPTO_CHACHA_RISCV64 tristate "Ciphers: ChaCha" depends on 64BIT && RISCV_ISA_V && TOOLCHAIN_HAS_VECTOR_CRYPTO select CRYPTO_SKCIPHER - select CRYPTO_LIB_CHACHA_GENERIC help Length-preserving ciphers: ChaCha20 stream cipher algorithm diff --git a/arch/s390/crypto/Kconfig b/arch/s390/crypto/Kconfig index b760232537f1..8c4db8b64fa2 100644 --- a/arch/s390/crypto/Kconfig +++ b/arch/s390/crypto/Kconfig @@ -108,11 +108,12 @@ config CRYPTO_DES_S390 As of z196 the CTR mode is hardware accelerated. config CRYPTO_CHACHA_S390 - tristate "Ciphers: ChaCha20" + tristate depends on S390 select CRYPTO_SKCIPHER select CRYPTO_LIB_CHACHA_GENERIC select CRYPTO_ARCH_HAVE_LIB_CHACHA + default CRYPTO_LIB_CHACHA_INTERNAL help Length-preserving cipher: ChaCha20 stream cipher (RFC 7539) diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 9c46b1b630b1..5d36f4020dfa 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -66,7 +66,6 @@ struct s390_xts_ctx { struct gcm_sg_walk { struct scatter_walk walk; unsigned int walk_bytes; - u8 *walk_ptr; unsigned int walk_bytes_remain; u8 buf[AES_BLOCK_SIZE]; unsigned int buf_bytes; @@ -787,29 +786,20 @@ static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg, static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw) { - struct scatterlist *nextsg; - - gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain); - while (!gw->walk_bytes) { - nextsg = sg_next(gw->walk.sg); - if (!nextsg) - return 0; - scatterwalk_start(&gw->walk, nextsg); - gw->walk_bytes = scatterwalk_clamp(&gw->walk, - gw->walk_bytes_remain); - } - gw->walk_ptr = scatterwalk_map(&gw->walk); + if (gw->walk_bytes_remain == 0) + return 0; + gw->walk_bytes = scatterwalk_next(&gw->walk, gw->walk_bytes_remain); return gw->walk_bytes; } static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw, - unsigned int nbytes) + unsigned int nbytes, bool out) { gw->walk_bytes_remain -= nbytes; - scatterwalk_unmap(gw->walk_ptr); - scatterwalk_advance(&gw->walk, nbytes); - scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain); - gw->walk_ptr = NULL; + if (out) + scatterwalk_done_dst(&gw->walk, nbytes); + else + scatterwalk_done_src(&gw->walk, nbytes); } static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) @@ -835,16 +825,16 @@ static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) } if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) { - gw->ptr = gw->walk_ptr; + gw->ptr = gw->walk.addr; gw->nbytes = gw->walk_bytes; goto out; } while (1) { n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes); - memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n); + memcpy(gw->buf + gw->buf_bytes, gw->walk.addr, n); gw->buf_bytes += n; - _gcm_sg_unmap_and_advance(gw, n); + _gcm_sg_unmap_and_advance(gw, n, false); if (gw->buf_bytes >= minbytesneeded) { gw->ptr = gw->buf; gw->nbytes = gw->buf_bytes; @@ -876,13 +866,12 @@ static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) } if (gw->walk_bytes >= minbytesneeded) { - gw->ptr = gw->walk_ptr; + gw->ptr = gw->walk.addr; gw->nbytes = gw->walk_bytes; goto out; } - scatterwalk_unmap(gw->walk_ptr); - gw->walk_ptr = NULL; + scatterwalk_unmap(&gw->walk); gw->ptr = gw->buf; gw->nbytes = sizeof(gw->buf); @@ -904,7 +893,7 @@ static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone) } else gw->buf_bytes = 0; } else - _gcm_sg_unmap_and_advance(gw, bytesdone); + _gcm_sg_unmap_and_advance(gw, bytesdone, false); return bytesdone; } @@ -921,11 +910,11 @@ static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone) if (!_gcm_sg_clamp_and_map(gw)) return i; n = min(gw->walk_bytes, bytesdone - i); - memcpy(gw->walk_ptr, gw->buf + i, n); - _gcm_sg_unmap_and_advance(gw, n); + memcpy(gw->walk.addr, gw->buf + i, n); + _gcm_sg_unmap_and_advance(gw, n, true); } } else - _gcm_sg_unmap_and_advance(gw, bytesdone); + _gcm_sg_unmap_and_advance(gw, bytesdone, true); return bytesdone; } diff --git a/arch/s390/crypto/chacha-glue.c b/arch/s390/crypto/chacha-glue.c index f8b0c52e77a4..920e9f0941e7 100644 --- a/arch/s390/crypto/chacha-glue.c +++ b/arch/s390/crypto/chacha-glue.c @@ -41,7 +41,7 @@ static int chacha20_s390(struct skcipher_request *req) int rc; rc = skcipher_walk_virt(&walk, req, false); - chacha_init_generic(state, ctx->key, req->iv); + chacha_init(state, ctx->key, req->iv); while (walk.nbytes > 0) { nbytes = walk.nbytes; @@ -69,12 +69,6 @@ void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds) } EXPORT_SYMBOL(hchacha_block_arch); -void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv) -{ - chacha_init_generic(state, key, iv); -} -EXPORT_SYMBOL(chacha_init_arch); - void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, int nrounds) { diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c index e3d2138ff9e2..683150830356 100644 --- a/arch/sparc/crypto/aes_glue.c +++ b/arch/sparc/crypto/aes_glue.c @@ -321,7 +321,7 @@ static void ctr_crypt_final(const struct crypto_sparc64_aes_ctx *ctx, { u8 *ctrblk = walk->iv; u64 keystream[AES_BLOCK_SIZE / sizeof(u64)]; - u8 *src = walk->src.virt.addr; + const u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; unsigned int nbytes = walk->nbytes; diff --git a/arch/x86/crypto/Kconfig b/arch/x86/crypto/Kconfig index 4757bf922075..3d948f10c94c 100644 --- a/arch/x86/crypto/Kconfig +++ b/arch/x86/crypto/Kconfig @@ -3,10 +3,12 @@ menu "Accelerated Cryptographic Algorithms for CPU (x86)" config CRYPTO_CURVE25519_X86 - tristate "Public key crypto: Curve25519 (ADX)" + tristate depends on X86 && 64BIT + select CRYPTO_KPP select CRYPTO_LIB_CURVE25519_GENERIC select CRYPTO_ARCH_HAVE_LIB_CURVE25519 + default CRYPTO_LIB_CURVE25519_INTERNAL help Curve25519 algorithm @@ -348,11 +350,12 @@ config CRYPTO_ARIA_GFNI_AVX512_X86_64 Processes 64 blocks in parallel. config CRYPTO_CHACHA20_X86_64 - tristate "Ciphers: ChaCha20, XChaCha20, XChaCha12 (SSSE3/AVX2/AVX-512VL)" + tristate depends on X86 && 64BIT select CRYPTO_SKCIPHER select CRYPTO_LIB_CHACHA_GENERIC select CRYPTO_ARCH_HAVE_LIB_CHACHA + default CRYPTO_LIB_CHACHA_INTERNAL help Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12 stream cipher algorithms @@ -417,10 +420,12 @@ config CRYPTO_POLYVAL_CLMUL_NI - CLMUL-NI (carry-less multiplication new instructions) config CRYPTO_POLY1305_X86_64 - tristate "Hash functions: Poly1305 (SSE2/AVX2)" + tristate depends on X86 && 64BIT + select CRYPTO_HASH select CRYPTO_LIB_POLY1305_GENERIC select CRYPTO_ARCH_HAVE_LIB_POLY1305 + default CRYPTO_LIB_POLY1305_INTERNAL help Poly1305 authenticator algorithm (RFC7539) diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 07b00bfca64b..5d19f41bde58 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -48,7 +48,7 @@ chacha-x86_64-$(CONFIG_AS_AVX512) += chacha-avx512vl-x86_64.o obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o -aesni-intel-$(CONFIG_64BIT) += aes_ctrby8_avx-x86_64.o \ +aesni-intel-$(CONFIG_64BIT) += aes-ctr-avx-x86_64.o \ aes-gcm-aesni-x86_64.o \ aes-xts-avx-x86_64.o ifeq ($(CONFIG_AS_VAES)$(CONFIG_AS_VPCLMULQDQ),yy) diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c index 01fa568dc5fc..26786e15abac 100644 --- a/arch/x86/crypto/aegis128-aesni-glue.c +++ b/arch/x86/crypto/aegis128-aesni-glue.c @@ -71,10 +71,9 @@ static void crypto_aegis128_aesni_process_ad( scatterwalk_start(&walk, sg_src); while (assoclen != 0) { - unsigned int size = scatterwalk_clamp(&walk, assoclen); + unsigned int size = scatterwalk_next(&walk, assoclen); + const u8 *src = walk.addr; unsigned int left = size; - void *mapped = scatterwalk_map(&walk); - const u8 *src = (const u8 *)mapped; if (pos + size >= AEGIS128_BLOCK_SIZE) { if (pos > 0) { @@ -97,9 +96,7 @@ static void crypto_aegis128_aesni_process_ad( pos += left; assoclen -= size; - scatterwalk_unmap(mapped); - scatterwalk_advance(&walk, size); - scatterwalk_done(&walk, 0, assoclen); + scatterwalk_done_src(&walk, size); } if (pos > 0) { diff --git a/arch/x86/crypto/aes-ctr-avx-x86_64.S b/arch/x86/crypto/aes-ctr-avx-x86_64.S new file mode 100644 index 000000000000..1685d8b24b2c --- /dev/null +++ b/arch/x86/crypto/aes-ctr-avx-x86_64.S @@ -0,0 +1,592 @@ +/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */ +// +// Copyright 2025 Google LLC +// +// Author: Eric Biggers <ebiggers@google.com> +// +// This file is dual-licensed, meaning that you can use it under your choice of +// either of the following two licenses: +// +// Licensed under the Apache License 2.0 (the "License"). You may obtain a copy +// of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// or +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +//------------------------------------------------------------------------------ +// +// This file contains x86_64 assembly implementations of AES-CTR and AES-XCTR +// using the following sets of CPU features: +// - AES-NI && AVX +// - VAES && AVX2 +// - VAES && (AVX10/256 || (AVX512BW && AVX512VL)) && BMI2 +// - VAES && (AVX10/512 || (AVX512BW && AVX512VL)) && BMI2 +// +// See the function definitions at the bottom of the file for more information. + +#include <linux/linkage.h> +#include <linux/cfi_types.h> + +.section .rodata +.p2align 4 + +.Lbswap_mask: + .octa 0x000102030405060708090a0b0c0d0e0f + +.Lctr_pattern: + .quad 0, 0 +.Lone: + .quad 1, 0 +.Ltwo: + .quad 2, 0 + .quad 3, 0 + +.Lfour: + .quad 4, 0 + +.text + +// Move a vector between memory and a register. +// The register operand must be in the first 16 vector registers. +.macro _vmovdqu src, dst +.if VL < 64 + vmovdqu \src, \dst +.else + vmovdqu8 \src, \dst +.endif +.endm + +// Move a vector between registers. +// The registers must be in the first 16 vector registers. +.macro _vmovdqa src, dst +.if VL < 64 + vmovdqa \src, \dst +.else + vmovdqa64 \src, \dst +.endif +.endm + +// Broadcast a 128-bit value from memory to all 128-bit lanes of a vector +// register. The register operand must be in the first 16 vector registers. +.macro _vbroadcast128 src, dst +.if VL == 16 + vmovdqu \src, \dst +.elseif VL == 32 + vbroadcasti128 \src, \dst +.else + vbroadcasti32x4 \src, \dst +.endif +.endm + +// XOR two vectors together. +// Any register operands must be in the first 16 vector registers. +.macro _vpxor src1, src2, dst +.if VL < 64 + vpxor \src1, \src2, \dst +.else + vpxord \src1, \src2, \dst +.endif +.endm + +// Load 1 <= %ecx <= 15 bytes from the pointer \src into the xmm register \dst +// and zeroize any remaining bytes. Clobbers %rax, %rcx, and \tmp{64,32}. +.macro _load_partial_block src, dst, tmp64, tmp32 + sub $8, %ecx // LEN - 8 + jle .Lle8\@ + + // Load 9 <= LEN <= 15 bytes. + vmovq (\src), \dst // Load first 8 bytes + mov (\src, %rcx), %rax // Load last 8 bytes + neg %ecx + shl $3, %ecx + shr %cl, %rax // Discard overlapping bytes + vpinsrq $1, %rax, \dst, \dst + jmp .Ldone\@ + +.Lle8\@: + add $4, %ecx // LEN - 4 + jl .Llt4\@ + + // Load 4 <= LEN <= 8 bytes. + mov (\src), %eax // Load first 4 bytes + mov (\src, %rcx), \tmp32 // Load last 4 bytes + jmp .Lcombine\@ + +.Llt4\@: + // Load 1 <= LEN <= 3 bytes. + add $2, %ecx // LEN - 2 + movzbl (\src), %eax // Load first byte + jl .Lmovq\@ + movzwl (\src, %rcx), \tmp32 // Load last 2 bytes +.Lcombine\@: + shl $3, %ecx + shl %cl, \tmp64 + or \tmp64, %rax // Combine the two parts +.Lmovq\@: + vmovq %rax, \dst +.Ldone\@: +.endm + +// Store 1 <= %ecx <= 15 bytes from the xmm register \src to the pointer \dst. +// Clobbers %rax, %rcx, and \tmp{64,32}. +.macro _store_partial_block src, dst, tmp64, tmp32 + sub $8, %ecx // LEN - 8 + jl .Llt8\@ + + // Store 8 <= LEN <= 15 bytes. + vpextrq $1, \src, %rax + mov %ecx, \tmp32 + shl $3, %ecx + ror %cl, %rax + mov %rax, (\dst, \tmp64) // Store last LEN - 8 bytes + vmovq \src, (\dst) // Store first 8 bytes + jmp .Ldone\@ + +.Llt8\@: + add $4, %ecx // LEN - 4 + jl .Llt4\@ + + // Store 4 <= LEN <= 7 bytes. + vpextrd $1, \src, %eax + mov %ecx, \tmp32 + shl $3, %ecx + ror %cl, %eax + mov %eax, (\dst, \tmp64) // Store last LEN - 4 bytes + vmovd \src, (\dst) // Store first 4 bytes + jmp .Ldone\@ + +.Llt4\@: + // Store 1 <= LEN <= 3 bytes. + vpextrb $0, \src, 0(\dst) + cmp $-2, %ecx // LEN - 4 == -2, i.e. LEN == 2? + jl .Ldone\@ + vpextrb $1, \src, 1(\dst) + je .Ldone\@ + vpextrb $2, \src, 2(\dst) +.Ldone\@: +.endm + +// Prepare the next two vectors of AES inputs in AESDATA\i0 and AESDATA\i1, and +// XOR each with the zero-th round key. Also update LE_CTR if !\final. +.macro _prepare_2_ctr_vecs is_xctr, i0, i1, final=0 +.if \is_xctr + .if USE_AVX10 + _vmovdqa LE_CTR, AESDATA\i0 + vpternlogd $0x96, XCTR_IV, RNDKEY0, AESDATA\i0 + .else + vpxor XCTR_IV, LE_CTR, AESDATA\i0 + vpxor RNDKEY0, AESDATA\i0, AESDATA\i0 + .endif + vpaddq LE_CTR_INC1, LE_CTR, AESDATA\i1 + + .if USE_AVX10 + vpternlogd $0x96, XCTR_IV, RNDKEY0, AESDATA\i1 + .else + vpxor XCTR_IV, AESDATA\i1, AESDATA\i1 + vpxor RNDKEY0, AESDATA\i1, AESDATA\i1 + .endif +.else + vpshufb BSWAP_MASK, LE_CTR, AESDATA\i0 + _vpxor RNDKEY0, AESDATA\i0, AESDATA\i0 + vpaddq LE_CTR_INC1, LE_CTR, AESDATA\i1 + vpshufb BSWAP_MASK, AESDATA\i1, AESDATA\i1 + _vpxor RNDKEY0, AESDATA\i1, AESDATA\i1 +.endif +.if !\final + vpaddq LE_CTR_INC2, LE_CTR, LE_CTR +.endif +.endm + +// Do all AES rounds on the data in the given AESDATA vectors, excluding the +// zero-th and last rounds. +.macro _aesenc_loop vecs:vararg + mov KEY, %rax +1: + _vbroadcast128 (%rax), RNDKEY +.irp i, \vecs + vaesenc RNDKEY, AESDATA\i, AESDATA\i +.endr + add $16, %rax + cmp %rax, RNDKEYLAST_PTR + jne 1b +.endm + +// Finalize the keystream blocks in the given AESDATA vectors by doing the last +// AES round, then XOR those keystream blocks with the corresponding data. +// Reduce latency by doing the XOR before the vaesenclast, utilizing the +// property vaesenclast(key, a) ^ b == vaesenclast(key ^ b, a). +.macro _aesenclast_and_xor vecs:vararg +.irp i, \vecs + _vpxor \i*VL(SRC), RNDKEYLAST, RNDKEY + vaesenclast RNDKEY, AESDATA\i, AESDATA\i +.endr +.irp i, \vecs + _vmovdqu AESDATA\i, \i*VL(DST) +.endr +.endm + +// XOR the keystream blocks in the specified AESDATA vectors with the +// corresponding data. +.macro _xor_data vecs:vararg +.irp i, \vecs + _vpxor \i*VL(SRC), AESDATA\i, AESDATA\i +.endr +.irp i, \vecs + _vmovdqu AESDATA\i, \i*VL(DST) +.endr +.endm + +.macro _aes_ctr_crypt is_xctr + + // Define register aliases V0-V15 that map to the xmm, ymm, or zmm + // registers according to the selected Vector Length (VL). +.irp i, 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 + .if VL == 16 + .set V\i, %xmm\i + .elseif VL == 32 + .set V\i, %ymm\i + .elseif VL == 64 + .set V\i, %zmm\i + .else + .error "Unsupported Vector Length (VL)" + .endif +.endr + + // Function arguments + .set KEY, %rdi // Initially points to the start of the + // crypto_aes_ctx, then is advanced to + // point to the index 1 round key + .set KEY32, %edi // Available as temp register after all + // keystream blocks have been generated + .set SRC, %rsi // Pointer to next source data + .set DST, %rdx // Pointer to next destination data + .set LEN, %ecx // Remaining length in bytes. + // Note: _load_partial_block relies on + // this being in %ecx. + .set LEN64, %rcx // Zero-extend LEN before using! + .set LEN8, %cl +.if \is_xctr + .set XCTR_IV_PTR, %r8 // const u8 iv[AES_BLOCK_SIZE]; + .set XCTR_CTR, %r9 // u64 ctr; +.else + .set LE_CTR_PTR, %r8 // const u64 le_ctr[2]; +.endif + + // Additional local variables + .set RNDKEYLAST_PTR, %r10 + .set AESDATA0, V0 + .set AESDATA0_XMM, %xmm0 + .set AESDATA1, V1 + .set AESDATA1_XMM, %xmm1 + .set AESDATA2, V2 + .set AESDATA3, V3 + .set AESDATA4, V4 + .set AESDATA5, V5 + .set AESDATA6, V6 + .set AESDATA7, V7 +.if \is_xctr + .set XCTR_IV, V8 +.else + .set BSWAP_MASK, V8 +.endif + .set LE_CTR, V9 + .set LE_CTR_XMM, %xmm9 + .set LE_CTR_INC1, V10 + .set LE_CTR_INC2, V11 + .set RNDKEY0, V12 + .set RNDKEYLAST, V13 + .set RNDKEY, V14 + + // Create the first vector of counters. +.if \is_xctr + .if VL == 16 + vmovq XCTR_CTR, LE_CTR + .elseif VL == 32 + vmovq XCTR_CTR, LE_CTR_XMM + inc XCTR_CTR + vmovq XCTR_CTR, AESDATA0_XMM + vinserti128 $1, AESDATA0_XMM, LE_CTR, LE_CTR + .else + vpbroadcastq XCTR_CTR, LE_CTR + vpsrldq $8, LE_CTR, LE_CTR + vpaddq .Lctr_pattern(%rip), LE_CTR, LE_CTR + .endif + _vbroadcast128 (XCTR_IV_PTR), XCTR_IV +.else + _vbroadcast128 (LE_CTR_PTR), LE_CTR + .if VL > 16 + vpaddq .Lctr_pattern(%rip), LE_CTR, LE_CTR + .endif + _vbroadcast128 .Lbswap_mask(%rip), BSWAP_MASK +.endif + +.if VL == 16 + _vbroadcast128 .Lone(%rip), LE_CTR_INC1 +.elseif VL == 32 + _vbroadcast128 .Ltwo(%rip), LE_CTR_INC1 +.else + _vbroadcast128 .Lfour(%rip), LE_CTR_INC1 +.endif + vpsllq $1, LE_CTR_INC1, LE_CTR_INC2 + + // Load the AES key length: 16 (AES-128), 24 (AES-192), or 32 (AES-256). + movl 480(KEY), %eax + + // Compute the pointer to the last round key. + lea 6*16(KEY, %rax, 4), RNDKEYLAST_PTR + + // Load the zero-th and last round keys. + _vbroadcast128 (KEY), RNDKEY0 + _vbroadcast128 (RNDKEYLAST_PTR), RNDKEYLAST + + // Make KEY point to the first round key. + add $16, KEY + + // This is the main loop, which encrypts 8 vectors of data at a time. + add $-8*VL, LEN + jl .Lloop_8x_done\@ +.Lloop_8x\@: + _prepare_2_ctr_vecs \is_xctr, 0, 1 + _prepare_2_ctr_vecs \is_xctr, 2, 3 + _prepare_2_ctr_vecs \is_xctr, 4, 5 + _prepare_2_ctr_vecs \is_xctr, 6, 7 + _aesenc_loop 0,1,2,3,4,5,6,7 + _aesenclast_and_xor 0,1,2,3,4,5,6,7 + sub $-8*VL, SRC + sub $-8*VL, DST + add $-8*VL, LEN + jge .Lloop_8x\@ +.Lloop_8x_done\@: + sub $-8*VL, LEN + jz .Ldone\@ + + // 1 <= LEN < 8*VL. Generate 2, 4, or 8 more vectors of keystream + // blocks, depending on the remaining LEN. + + _prepare_2_ctr_vecs \is_xctr, 0, 1 + _prepare_2_ctr_vecs \is_xctr, 2, 3 + cmp $4*VL, LEN + jle .Lenc_tail_atmost4vecs\@ + + // 4*VL < LEN < 8*VL. Generate 8 vectors of keystream blocks. Use the + // first 4 to XOR 4 full vectors of data. Then XOR the remaining data. + _prepare_2_ctr_vecs \is_xctr, 4, 5 + _prepare_2_ctr_vecs \is_xctr, 6, 7, final=1 + _aesenc_loop 0,1,2,3,4,5,6,7 + _aesenclast_and_xor 0,1,2,3 + vaesenclast RNDKEYLAST, AESDATA4, AESDATA0 + vaesenclast RNDKEYLAST, AESDATA5, AESDATA1 + vaesenclast RNDKEYLAST, AESDATA6, AESDATA2 + vaesenclast RNDKEYLAST, AESDATA7, AESDATA3 + sub $-4*VL, SRC + sub $-4*VL, DST + add $-4*VL, LEN + cmp $1*VL-1, LEN + jle .Lxor_tail_partial_vec_0\@ + _xor_data 0 + cmp $2*VL-1, LEN + jle .Lxor_tail_partial_vec_1\@ + _xor_data 1 + cmp $3*VL-1, LEN + jle .Lxor_tail_partial_vec_2\@ + _xor_data 2 + cmp $4*VL-1, LEN + jle .Lxor_tail_partial_vec_3\@ + _xor_data 3 + jmp .Ldone\@ + +.Lenc_tail_atmost4vecs\@: + cmp $2*VL, LEN + jle .Lenc_tail_atmost2vecs\@ + + // 2*VL < LEN <= 4*VL. Generate 4 vectors of keystream blocks. Use the + // first 2 to XOR 2 full vectors of data. Then XOR the remaining data. + _aesenc_loop 0,1,2,3 + _aesenclast_and_xor 0,1 + vaesenclast RNDKEYLAST, AESDATA2, AESDATA0 + vaesenclast RNDKEYLAST, AESDATA3, AESDATA1 + sub $-2*VL, SRC + sub $-2*VL, DST + add $-2*VL, LEN + jmp .Lxor_tail_upto2vecs\@ + +.Lenc_tail_atmost2vecs\@: + // 1 <= LEN <= 2*VL. Generate 2 vectors of keystream blocks. Then XOR + // the remaining data. + _aesenc_loop 0,1 + vaesenclast RNDKEYLAST, AESDATA0, AESDATA0 + vaesenclast RNDKEYLAST, AESDATA1, AESDATA1 + +.Lxor_tail_upto2vecs\@: + cmp $1*VL-1, LEN + jle .Lxor_tail_partial_vec_0\@ + _xor_data 0 + cmp $2*VL-1, LEN + jle .Lxor_tail_partial_vec_1\@ + _xor_data 1 + jmp .Ldone\@ + +.Lxor_tail_partial_vec_1\@: + add $-1*VL, LEN + jz .Ldone\@ + sub $-1*VL, SRC + sub $-1*VL, DST + _vmovdqa AESDATA1, AESDATA0 + jmp .Lxor_tail_partial_vec_0\@ + +.Lxor_tail_partial_vec_2\@: + add $-2*VL, LEN + jz .Ldone\@ + sub $-2*VL, SRC + sub $-2*VL, DST + _vmovdqa AESDATA2, AESDATA0 + jmp .Lxor_tail_partial_vec_0\@ + +.Lxor_tail_partial_vec_3\@: + add $-3*VL, LEN + jz .Ldone\@ + sub $-3*VL, SRC + sub $-3*VL, DST + _vmovdqa AESDATA3, AESDATA0 + +.Lxor_tail_partial_vec_0\@: + // XOR the remaining 1 <= LEN < VL bytes. It's easy if masked + // loads/stores are available; otherwise it's a bit harder... +.if USE_AVX10 + .if VL <= 32 + mov $-1, %eax + bzhi LEN, %eax, %eax + kmovd %eax, %k1 + .else + mov $-1, %rax + bzhi LEN64, %rax, %rax + kmovq %rax, %k1 + .endif + vmovdqu8 (SRC), AESDATA1{%k1}{z} + _vpxor AESDATA1, AESDATA0, AESDATA0 + vmovdqu8 AESDATA0, (DST){%k1} +.else + .if VL == 32 + cmp $16, LEN + jl 1f + vpxor (SRC), AESDATA0_XMM, AESDATA1_XMM + vmovdqu AESDATA1_XMM, (DST) + add $16, SRC + add $16, DST + sub $16, LEN + jz .Ldone\@ + vextracti128 $1, AESDATA0, AESDATA0_XMM +1: + .endif + mov LEN, %r10d + _load_partial_block SRC, AESDATA1_XMM, KEY, KEY32 + vpxor AESDATA1_XMM, AESDATA0_XMM, AESDATA0_XMM + mov %r10d, %ecx + _store_partial_block AESDATA0_XMM, DST, KEY, KEY32 +.endif + +.Ldone\@: +.if VL > 16 + vzeroupper +.endif + RET +.endm + +// Below are the definitions of the functions generated by the above macro. +// They have the following prototypes: +// +// +// void aes_ctr64_crypt_##suffix(const struct crypto_aes_ctx *key, +// const u8 *src, u8 *dst, int len, +// const u64 le_ctr[2]); +// +// void aes_xctr_crypt_##suffix(const struct crypto_aes_ctx *key, +// const u8 *src, u8 *dst, int len, +// const u8 iv[AES_BLOCK_SIZE], u64 ctr); +// +// Both functions generate |len| bytes of keystream, XOR it with the data from +// |src|, and write the result to |dst|. On non-final calls, |len| must be a +// multiple of 16. On the final call, |len| can be any value. +// +// aes_ctr64_crypt_* implement "regular" CTR, where the keystream is generated +// from a 128-bit big endian counter that increments by 1 for each AES block. +// HOWEVER, to keep the assembly code simple, some of the counter management is +// left to the caller. aes_ctr64_crypt_* take the counter in little endian +// form, only increment the low 64 bits internally, do the conversion to big +// endian internally, and don't write the updated counter back to memory. The +// caller is responsible for converting the starting IV to the little endian +// le_ctr, detecting the (very rare) case of a carry out of the low 64 bits +// being needed and splitting at that point with a carry done in between, and +// updating le_ctr after each part if the message is multi-part. +// +// aes_xctr_crypt_* implement XCTR as specified in "Length-preserving encryption +// with HCTR2" (https://eprint.iacr.org/2021/1441.pdf). XCTR is an +// easier-to-implement variant of CTR that uses little endian byte order and +// eliminates carries. |ctr| is the per-message block counter starting at 1. + +.set VL, 16 +.set USE_AVX10, 0 +SYM_TYPED_FUNC_START(aes_ctr64_crypt_aesni_avx) + _aes_ctr_crypt 0 +SYM_FUNC_END(aes_ctr64_crypt_aesni_avx) +SYM_TYPED_FUNC_START(aes_xctr_crypt_aesni_avx) + _aes_ctr_crypt 1 +SYM_FUNC_END(aes_xctr_crypt_aesni_avx) + +#if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) +.set VL, 32 +.set USE_AVX10, 0 +SYM_TYPED_FUNC_START(aes_ctr64_crypt_vaes_avx2) + _aes_ctr_crypt 0 +SYM_FUNC_END(aes_ctr64_crypt_vaes_avx2) +SYM_TYPED_FUNC_START(aes_xctr_crypt_vaes_avx2) + _aes_ctr_crypt 1 +SYM_FUNC_END(aes_xctr_crypt_vaes_avx2) + +.set VL, 32 +.set USE_AVX10, 1 +SYM_TYPED_FUNC_START(aes_ctr64_crypt_vaes_avx10_256) + _aes_ctr_crypt 0 +SYM_FUNC_END(aes_ctr64_crypt_vaes_avx10_256) +SYM_TYPED_FUNC_START(aes_xctr_crypt_vaes_avx10_256) + _aes_ctr_crypt 1 +SYM_FUNC_END(aes_xctr_crypt_vaes_avx10_256) + +.set VL, 64 +.set USE_AVX10, 1 +SYM_TYPED_FUNC_START(aes_ctr64_crypt_vaes_avx10_512) + _aes_ctr_crypt 0 +SYM_FUNC_END(aes_ctr64_crypt_vaes_avx10_512) +SYM_TYPED_FUNC_START(aes_xctr_crypt_vaes_avx10_512) + _aes_ctr_crypt 1 +SYM_FUNC_END(aes_xctr_crypt_vaes_avx10_512) +#endif // CONFIG_AS_VAES && CONFIG_AS_VPCLMULQDQ diff --git a/arch/x86/crypto/aes-xts-avx-x86_64.S b/arch/x86/crypto/aes-xts-avx-x86_64.S index 8a3e23fbcf85..93ba0ddbe009 100644 --- a/arch/x86/crypto/aes-xts-avx-x86_64.S +++ b/arch/x86/crypto/aes-xts-avx-x86_64.S @@ -1,11 +1,50 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * AES-XTS for modern x86_64 CPUs - * - * Copyright 2024 Google LLC - * - * Author: Eric Biggers <ebiggers@google.com> - */ +/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */ +// +// AES-XTS for modern x86_64 CPUs +// +// Copyright 2024 Google LLC +// +// Author: Eric Biggers <ebiggers@google.com> +// +//------------------------------------------------------------------------------ +// +// This file is dual-licensed, meaning that you can use it under your choice of +// either of the following two licenses: +// +// Licensed under the Apache License 2.0 (the "License"). You may obtain a copy +// of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// or +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. /* * This file implements AES-XTS for modern x86_64 CPUs. To handle the diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S deleted file mode 100644 index 2402b9418cd7..000000000000 --- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S +++ /dev/null @@ -1,597 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */ -/* - * AES CTR mode by8 optimization with AVX instructions. (x86_64) - * - * Copyright(c) 2014 Intel Corporation. - * - * Contact Information: - * James Guilford <james.guilford@intel.com> - * Sean Gulley <sean.m.gulley@intel.com> - * Chandramouli Narayanan <mouli@linux.intel.com> - */ -/* - * This is AES128/192/256 CTR mode optimization implementation. It requires - * the support of Intel(R) AESNI and AVX instructions. - * - * This work was inspired by the AES CTR mode optimization published - * in Intel Optimized IPSEC Cryptographic library. - * Additional information on it can be found at: - * https://github.com/intel/intel-ipsec-mb - */ - -#include <linux/linkage.h> - -#define VMOVDQ vmovdqu - -/* - * Note: the "x" prefix in these aliases means "this is an xmm register". The - * alias prefixes have no relation to XCTR where the "X" prefix means "XOR - * counter". - */ -#define xdata0 %xmm0 -#define xdata1 %xmm1 -#define xdata2 %xmm2 -#define xdata3 %xmm3 -#define xdata4 %xmm4 -#define xdata5 %xmm5 -#define xdata6 %xmm6 -#define xdata7 %xmm7 -#define xcounter %xmm8 // CTR mode only -#define xiv %xmm8 // XCTR mode only -#define xbyteswap %xmm9 // CTR mode only -#define xtmp %xmm9 // XCTR mode only -#define xkey0 %xmm10 -#define xkey4 %xmm11 -#define xkey8 %xmm12 -#define xkey12 %xmm13 -#define xkeyA %xmm14 -#define xkeyB %xmm15 - -#define p_in %rdi -#define p_iv %rsi -#define p_keys %rdx -#define p_out %rcx -#define num_bytes %r8 -#define counter %r9 // XCTR mode only -#define tmp %r10 -#define DDQ_DATA 0 -#define XDATA 1 -#define KEY_128 1 -#define KEY_192 2 -#define KEY_256 3 - -.section .rodata -.align 16 - -byteswap_const: - .octa 0x000102030405060708090A0B0C0D0E0F -ddq_low_msk: - .octa 0x0000000000000000FFFFFFFFFFFFFFFF -ddq_high_add_1: - .octa 0x00000000000000010000000000000000 -ddq_add_1: - .octa 0x00000000000000000000000000000001 -ddq_add_2: - .octa 0x00000000000000000000000000000002 -ddq_add_3: - .octa 0x00000000000000000000000000000003 -ddq_add_4: - .octa 0x00000000000000000000000000000004 -ddq_add_5: - .octa 0x00000000000000000000000000000005 -ddq_add_6: - .octa 0x00000000000000000000000000000006 -ddq_add_7: - .octa 0x00000000000000000000000000000007 -ddq_add_8: - .octa 0x00000000000000000000000000000008 - -.text - -/* generate a unique variable for ddq_add_x */ - -/* generate a unique variable for xmm register */ -.macro setxdata n - var_xdata = %xmm\n -.endm - -/* club the numeric 'id' to the symbol 'name' */ - -.macro club name, id -.altmacro - .if \name == XDATA - setxdata %\id - .endif -.noaltmacro -.endm - -/* - * do_aes num_in_par load_keys key_len - * This increments p_in, but not p_out - */ -.macro do_aes b, k, key_len, xctr - .set by, \b - .set load_keys, \k - .set klen, \key_len - - .if (load_keys) - vmovdqa 0*16(p_keys), xkey0 - .endif - - .if \xctr - movq counter, xtmp - .set i, 0 - .rept (by) - club XDATA, i - vpaddq (ddq_add_1 + 16 * i)(%rip), xtmp, var_xdata - .set i, (i +1) - .endr - .set i, 0 - .rept (by) - club XDATA, i - vpxor xiv, var_xdata, var_xdata - .set i, (i +1) - .endr - .else - vpshufb xbyteswap, xcounter, xdata0 - .set i, 1 - .rept (by - 1) - club XDATA, i - vpaddq (ddq_add_1 + 16 * (i - 1))(%rip), xcounter, var_xdata - vptest ddq_low_msk(%rip), var_xdata - jnz 1f - vpaddq ddq_high_add_1(%rip), var_xdata, var_xdata - vpaddq ddq_high_add_1(%rip), xcounter, xcounter - 1: - vpshufb xbyteswap, var_xdata, var_xdata - .set i, (i +1) - .endr - .endif - - vmovdqa 1*16(p_keys), xkeyA - - vpxor xkey0, xdata0, xdata0 - .if \xctr - add $by, counter - .else - vpaddq (ddq_add_1 + 16 * (by - 1))(%rip), xcounter, xcounter - vptest ddq_low_msk(%rip), xcounter - jnz 1f - vpaddq ddq_high_add_1(%rip), xcounter, xcounter - 1: - .endif - - .set i, 1 - .rept (by - 1) - club XDATA, i - vpxor xkey0, var_xdata, var_xdata - .set i, (i +1) - .endr - - vmovdqa 2*16(p_keys), xkeyB - - .set i, 0 - .rept by - club XDATA, i - vaesenc xkeyA, var_xdata, var_xdata /* key 1 */ - .set i, (i +1) - .endr - - .if (klen == KEY_128) - .if (load_keys) - vmovdqa 3*16(p_keys), xkey4 - .endif - .else - vmovdqa 3*16(p_keys), xkeyA - .endif - - .set i, 0 - .rept by - club XDATA, i - vaesenc xkeyB, var_xdata, var_xdata /* key 2 */ - .set i, (i +1) - .endr - - add $(16*by), p_in - - .if (klen == KEY_128) - vmovdqa 4*16(p_keys), xkeyB - .else - .if (load_keys) - vmovdqa 4*16(p_keys), xkey4 - .endif - .endif - - .set i, 0 - .rept by - club XDATA, i - /* key 3 */ - .if (klen == KEY_128) - vaesenc xkey4, var_xdata, var_xdata - .else - vaesenc xkeyA, var_xdata, var_xdata - .endif - .set i, (i +1) - .endr - - vmovdqa 5*16(p_keys), xkeyA - - .set i, 0 - .rept by - club XDATA, i - /* key 4 */ - .if (klen == KEY_128) - vaesenc xkeyB, var_xdata, var_xdata - .else - vaesenc xkey4, var_xdata, var_xdata - .endif - .set i, (i +1) - .endr - - .if (klen == KEY_128) - .if (load_keys) - vmovdqa 6*16(p_keys), xkey8 - .endif - .else - vmovdqa 6*16(p_keys), xkeyB - .endif - - .set i, 0 - .rept by - club XDATA, i - vaesenc xkeyA, var_xdata, var_xdata /* key 5 */ - .set i, (i +1) - .endr - - vmovdqa 7*16(p_keys), xkeyA - - .set i, 0 - .rept by - club XDATA, i - /* key 6 */ - .if (klen == KEY_128) - vaesenc xkey8, var_xdata, var_xdata - .else - vaesenc xkeyB, var_xdata, var_xdata - .endif - .set i, (i +1) - .endr - - .if (klen == KEY_128) - vmovdqa 8*16(p_keys), xkeyB - .else - .if (load_keys) - vmovdqa 8*16(p_keys), xkey8 - .endif - .endif - - .set i, 0 - .rept by - club XDATA, i - vaesenc xkeyA, var_xdata, var_xdata /* key 7 */ - .set i, (i +1) - .endr - - .if (klen == KEY_128) - .if (load_keys) - vmovdqa 9*16(p_keys), xkey12 - .endif - .else - vmovdqa 9*16(p_keys), xkeyA - .endif - - .set i, 0 - .rept by - club XDATA, i - /* key 8 */ - .if (klen == KEY_128) - vaesenc xkeyB, var_xdata, var_xdata - .else - vaesenc xkey8, var_xdata, var_xdata - .endif - .set i, (i +1) - .endr - - vmovdqa 10*16(p_keys), xkeyB - - .set i, 0 - .rept by - club XDATA, i - /* key 9 */ - .if (klen == KEY_128) - vaesenc xkey12, var_xdata, var_xdata - .else - vaesenc xkeyA, var_xdata, var_xdata - .endif - .set i, (i +1) - .endr - - .if (klen != KEY_128) - vmovdqa 11*16(p_keys), xkeyA - .endif - - .set i, 0 - .rept by - club XDATA, i - /* key 10 */ - .if (klen == KEY_128) - vaesenclast xkeyB, var_xdata, var_xdata - .else - vaesenc xkeyB, var_xdata, var_xdata - .endif - .set i, (i +1) - .endr - - .if (klen != KEY_128) - .if (load_keys) - vmovdqa 12*16(p_keys), xkey12 - .endif - - .set i, 0 - .rept by - club XDATA, i - vaesenc xkeyA, var_xdata, var_xdata /* key 11 */ - .set i, (i +1) - .endr - - .if (klen == KEY_256) - vmovdqa 13*16(p_keys), xkeyA - .endif - - .set i, 0 - .rept by - club XDATA, i - .if (klen == KEY_256) - /* key 12 */ - vaesenc xkey12, var_xdata, var_xdata - .else - vaesenclast xkey12, var_xdata, var_xdata - .endif - .set i, (i +1) - .endr - - .if (klen == KEY_256) - vmovdqa 14*16(p_keys), xkeyB - - .set i, 0 - .rept by - club XDATA, i - /* key 13 */ - vaesenc xkeyA, var_xdata, var_xdata - .set i, (i +1) - .endr - - .set i, 0 - .rept by - club XDATA, i - /* key 14 */ - vaesenclast xkeyB, var_xdata, var_xdata - .set i, (i +1) - .endr - .endif - .endif - - .set i, 0 - .rept (by / 2) - .set j, (i+1) - VMOVDQ (i*16 - 16*by)(p_in), xkeyA - VMOVDQ (j*16 - 16*by)(p_in), xkeyB - club XDATA, i - vpxor xkeyA, var_xdata, var_xdata - club XDATA, j - vpxor xkeyB, var_xdata, var_xdata - .set i, (i+2) - .endr - - .if (i < by) - VMOVDQ (i*16 - 16*by)(p_in), xkeyA - club XDATA, i - vpxor xkeyA, var_xdata, var_xdata - .endif - - .set i, 0 - .rept by - club XDATA, i - VMOVDQ var_xdata, i*16(p_out) - .set i, (i+1) - .endr -.endm - -.macro do_aes_load val, key_len, xctr - do_aes \val, 1, \key_len, \xctr -.endm - -.macro do_aes_noload val, key_len, xctr - do_aes \val, 0, \key_len, \xctr -.endm - -/* main body of aes ctr load */ - -.macro do_aes_ctrmain key_len, xctr - cmp $16, num_bytes - jb .Ldo_return2\xctr\key_len - - .if \xctr - shr $4, counter - vmovdqu (p_iv), xiv - .else - vmovdqa byteswap_const(%rip), xbyteswap - vmovdqu (p_iv), xcounter - vpshufb xbyteswap, xcounter, xcounter - .endif - - mov num_bytes, tmp - and $(7*16), tmp - jz .Lmult_of_8_blks\xctr\key_len - - /* 1 <= tmp <= 7 */ - cmp $(4*16), tmp - jg .Lgt4\xctr\key_len - je .Leq4\xctr\key_len - -.Llt4\xctr\key_len: - cmp $(2*16), tmp - jg .Leq3\xctr\key_len - je .Leq2\xctr\key_len - -.Leq1\xctr\key_len: - do_aes_load 1, \key_len, \xctr - add $(1*16), p_out - and $(~7*16), num_bytes - jz .Ldo_return2\xctr\key_len - jmp .Lmain_loop2\xctr\key_len - -.Leq2\xctr\key_len: - do_aes_load 2, \key_len, \xctr - add $(2*16), p_out - and $(~7*16), num_bytes - jz .Ldo_return2\xctr\key_len - jmp .Lmain_loop2\xctr\key_len - - -.Leq3\xctr\key_len: - do_aes_load 3, \key_len, \xctr - add $(3*16), p_out - and $(~7*16), num_bytes - jz .Ldo_return2\xctr\key_len - jmp .Lmain_loop2\xctr\key_len - -.Leq4\xctr\key_len: - do_aes_load 4, \key_len, \xctr - add $(4*16), p_out - and $(~7*16), num_bytes - jz .Ldo_return2\xctr\key_len - jmp .Lmain_loop2\xctr\key_len - -.Lgt4\xctr\key_len: - cmp $(6*16), tmp - jg .Leq7\xctr\key_len - je .Leq6\xctr\key_len - -.Leq5\xctr\key_len: - do_aes_load 5, \key_len, \xctr - add $(5*16), p_out - and $(~7*16), num_bytes - jz .Ldo_return2\xctr\key_len - jmp .Lmain_loop2\xctr\key_len - -.Leq6\xctr\key_len: - do_aes_load 6, \key_len, \xctr - add $(6*16), p_out - and $(~7*16), num_bytes - jz .Ldo_return2\xctr\key_len - jmp .Lmain_loop2\xctr\key_len - -.Leq7\xctr\key_len: - do_aes_load 7, \key_len, \xctr - add $(7*16), p_out - and $(~7*16), num_bytes - jz .Ldo_return2\xctr\key_len - jmp .Lmain_loop2\xctr\key_len - -.Lmult_of_8_blks\xctr\key_len: - .if (\key_len != KEY_128) - vmovdqa 0*16(p_keys), xkey0 - vmovdqa 4*16(p_keys), xkey4 - vmovdqa 8*16(p_keys), xkey8 - vmovdqa 12*16(p_keys), xkey12 - .else - vmovdqa 0*16(p_keys), xkey0 - vmovdqa 3*16(p_keys), xkey4 - vmovdqa 6*16(p_keys), xkey8 - vmovdqa 9*16(p_keys), xkey12 - .endif -.align 16 -.Lmain_loop2\xctr\key_len: - /* num_bytes is a multiple of 8 and >0 */ - do_aes_noload 8, \key_len, \xctr - add $(8*16), p_out - sub $(8*16), num_bytes - jne .Lmain_loop2\xctr\key_len - -.Ldo_return2\xctr\key_len: - .if !\xctr - /* return updated IV */ - vpshufb xbyteswap, xcounter, xcounter - vmovdqu xcounter, (p_iv) - .endif - RET -.endm - -/* - * routine to do AES128 CTR enc/decrypt "by8" - * XMM registers are clobbered. - * Saving/restoring must be done at a higher level - * aes_ctr_enc_128_avx_by8(void *in, void *iv, void *keys, void *out, - * unsigned int num_bytes) - */ -SYM_FUNC_START(aes_ctr_enc_128_avx_by8) - /* call the aes main loop */ - do_aes_ctrmain KEY_128 0 - -SYM_FUNC_END(aes_ctr_enc_128_avx_by8) - -/* - * routine to do AES192 CTR enc/decrypt "by8" - * XMM registers are clobbered. - * Saving/restoring must be done at a higher level - * aes_ctr_enc_192_avx_by8(void *in, void *iv, void *keys, void *out, - * unsigned int num_bytes) - */ -SYM_FUNC_START(aes_ctr_enc_192_avx_by8) - /* call the aes main loop */ - do_aes_ctrmain KEY_192 0 - -SYM_FUNC_END(aes_ctr_enc_192_avx_by8) - -/* - * routine to do AES256 CTR enc/decrypt "by8" - * XMM registers are clobbered. - * Saving/restoring must be done at a higher level - * aes_ctr_enc_256_avx_by8(void *in, void *iv, void *keys, void *out, - * unsigned int num_bytes) - */ -SYM_FUNC_START(aes_ctr_enc_256_avx_by8) - /* call the aes main loop */ - do_aes_ctrmain KEY_256 0 - -SYM_FUNC_END(aes_ctr_enc_256_avx_by8) - -/* - * routine to do AES128 XCTR enc/decrypt "by8" - * XMM registers are clobbered. - * Saving/restoring must be done at a higher level - * aes_xctr_enc_128_avx_by8(const u8 *in, const u8 *iv, const void *keys, - * u8* out, unsigned int num_bytes, unsigned int byte_ctr) - */ -SYM_FUNC_START(aes_xctr_enc_128_avx_by8) - /* call the aes main loop */ - do_aes_ctrmain KEY_128 1 - -SYM_FUNC_END(aes_xctr_enc_128_avx_by8) - -/* - * routine to do AES192 XCTR enc/decrypt "by8" - * XMM registers are clobbered. - * Saving/restoring must be done at a higher level - * aes_xctr_enc_192_avx_by8(const u8 *in, const u8 *iv, const void *keys, - * u8* out, unsigned int num_bytes, unsigned int byte_ctr) - */ -SYM_FUNC_START(aes_xctr_enc_192_avx_by8) - /* call the aes main loop */ - do_aes_ctrmain KEY_192 1 - -SYM_FUNC_END(aes_xctr_enc_192_avx_by8) - -/* - * routine to do AES256 XCTR enc/decrypt "by8" - * XMM registers are clobbered. - * Saving/restoring must be done at a higher level - * aes_xctr_enc_256_avx_by8(const u8 *in, const u8 *iv, const void *keys, - * u8* out, unsigned int num_bytes, unsigned int byte_ctr) - */ -SYM_FUNC_START(aes_xctr_enc_256_avx_by8) - /* call the aes main loop */ - do_aes_ctrmain KEY_256 1 - -SYM_FUNC_END(aes_xctr_enc_256_avx_by8) diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 3e9ab5cdade4..bc655d794a95 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -23,7 +23,6 @@ #include <linux/err.h> #include <crypto/algapi.h> #include <crypto/aes.h> -#include <crypto/ctr.h> #include <crypto/b128ops.h> #include <crypto/gcm.h> #include <crypto/xts.h> @@ -82,30 +81,8 @@ asmlinkage void aesni_xts_dec(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv); #ifdef CONFIG_X86_64 - asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out, const u8 *in, unsigned int len, u8 *iv); -DEFINE_STATIC_CALL(aesni_ctr_enc_tfm, aesni_ctr_enc); - -asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv, - void *keys, u8 *out, unsigned int num_bytes); -asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv, - void *keys, u8 *out, unsigned int num_bytes); -asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv, - void *keys, u8 *out, unsigned int num_bytes); - - -asmlinkage void aes_xctr_enc_128_avx_by8(const u8 *in, const u8 *iv, - const void *keys, u8 *out, unsigned int num_bytes, - unsigned int byte_ctr); - -asmlinkage void aes_xctr_enc_192_avx_by8(const u8 *in, const u8 *iv, - const void *keys, u8 *out, unsigned int num_bytes, - unsigned int byte_ctr); - -asmlinkage void aes_xctr_enc_256_avx_by8(const u8 *in, const u8 *iv, - const void *keys, u8 *out, unsigned int num_bytes, - unsigned int byte_ctr); #endif static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx) @@ -376,24 +353,8 @@ static int cts_cbc_decrypt(struct skcipher_request *req) } #ifdef CONFIG_X86_64 -static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out, - const u8 *in, unsigned int len, u8 *iv) -{ - /* - * based on key length, override with the by8 version - * of ctr mode encryption/decryption for improved performance - * aes_set_key_common() ensures that key length is one of - * {128,192,256} - */ - if (ctx->key_length == AES_KEYSIZE_128) - aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len); - else if (ctx->key_length == AES_KEYSIZE_192) - aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len); - else - aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len); -} - -static int ctr_crypt(struct skcipher_request *req) +/* This is the non-AVX version. */ +static int ctr_crypt_aesni(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm)); @@ -407,10 +368,9 @@ static int ctr_crypt(struct skcipher_request *req) while ((nbytes = walk.nbytes) > 0) { kernel_fpu_begin(); if (nbytes & AES_BLOCK_MASK) - static_call(aesni_ctr_enc_tfm)(ctx, walk.dst.virt.addr, - walk.src.virt.addr, - nbytes & AES_BLOCK_MASK, - walk.iv); + aesni_ctr_enc(ctx, walk.dst.virt.addr, + walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); nbytes &= ~AES_BLOCK_MASK; if (walk.nbytes == walk.total && nbytes > 0) { @@ -426,59 +386,6 @@ static int ctr_crypt(struct skcipher_request *req) } return err; } - -static void aesni_xctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out, - const u8 *in, unsigned int len, u8 *iv, - unsigned int byte_ctr) -{ - if (ctx->key_length == AES_KEYSIZE_128) - aes_xctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len, - byte_ctr); - else if (ctx->key_length == AES_KEYSIZE_192) - aes_xctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len, - byte_ctr); - else - aes_xctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len, - byte_ctr); -} - -static int xctr_crypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm)); - u8 keystream[AES_BLOCK_SIZE]; - struct skcipher_walk walk; - unsigned int nbytes; - unsigned int byte_ctr = 0; - int err; - __le32 block[AES_BLOCK_SIZE / sizeof(__le32)]; - - err = skcipher_walk_virt(&walk, req, false); - - while ((nbytes = walk.nbytes) > 0) { - kernel_fpu_begin(); - if (nbytes & AES_BLOCK_MASK) - aesni_xctr_enc_avx_tfm(ctx, walk.dst.virt.addr, - walk.src.virt.addr, nbytes & AES_BLOCK_MASK, - walk.iv, byte_ctr); - nbytes &= ~AES_BLOCK_MASK; - byte_ctr += walk.nbytes - nbytes; - - if (walk.nbytes == walk.total && nbytes > 0) { - memcpy(block, walk.iv, AES_BLOCK_SIZE); - block[0] ^= cpu_to_le32(1 + byte_ctr / AES_BLOCK_SIZE); - aesni_enc(ctx, keystream, (u8 *)block); - crypto_xor_cpy(walk.dst.virt.addr + walk.nbytes - - nbytes, walk.src.virt.addr + walk.nbytes - - nbytes, keystream, nbytes); - byte_ctr += nbytes; - nbytes = 0; - } - kernel_fpu_end(); - err = skcipher_walk_done(&walk, nbytes); - } - return err; -} #endif static int xts_setkey_aesni(struct crypto_skcipher *tfm, const u8 *key, @@ -581,11 +488,8 @@ xts_crypt(struct skcipher_request *req, xts_encrypt_iv_func encrypt_iv, { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); const struct aesni_xts_ctx *ctx = aes_xts_ctx(tfm); - const unsigned int cryptlen = req->cryptlen; - struct scatterlist *src = req->src; - struct scatterlist *dst = req->dst; - if (unlikely(cryptlen < AES_BLOCK_SIZE)) + if (unlikely(req->cryptlen < AES_BLOCK_SIZE)) return -EINVAL; kernel_fpu_begin(); @@ -593,23 +497,16 @@ xts_crypt(struct skcipher_request *req, xts_encrypt_iv_func encrypt_iv, /* * In practice, virtually all XTS plaintexts and ciphertexts are either - * 512 or 4096 bytes, aligned such that they don't span page boundaries. - * To optimize the performance of these cases, and also any other case - * where no page boundary is spanned, the below fast-path handles - * single-page sources and destinations as efficiently as possible. + * 512 or 4096 bytes and do not use multiple scatterlist elements. To + * optimize the performance of these cases, the below fast-path handles + * single-scatterlist-element messages as efficiently as possible. The + * code is 64-bit specific, as it assumes no page mapping is needed. */ - if (likely(src->length >= cryptlen && dst->length >= cryptlen && - src->offset + cryptlen <= PAGE_SIZE && - dst->offset + cryptlen <= PAGE_SIZE)) { - struct page *src_page = sg_page(src); - struct page *dst_page = sg_page(dst); - void *src_virt = kmap_local_page(src_page) + src->offset; - void *dst_virt = kmap_local_page(dst_page) + dst->offset; - - (*crypt_func)(&ctx->crypt_ctx, src_virt, dst_virt, cryptlen, - req->iv); - kunmap_local(dst_virt); - kunmap_local(src_virt); + if (IS_ENABLED(CONFIG_X86_64) && + likely(req->src->length >= req->cryptlen && + req->dst->length >= req->cryptlen)) { + (*crypt_func)(&ctx->crypt_ctx, sg_virt(req->src), + sg_virt(req->dst), req->cryptlen, req->iv); kernel_fpu_end(); return 0; } @@ -731,8 +628,8 @@ static struct skcipher_alg aesni_skciphers[] = { .ivsize = AES_BLOCK_SIZE, .chunksize = AES_BLOCK_SIZE, .setkey = aesni_skcipher_setkey, - .encrypt = ctr_crypt, - .decrypt = ctr_crypt, + .encrypt = ctr_crypt_aesni, + .decrypt = ctr_crypt_aesni, #endif }, { .base = { @@ -758,35 +655,105 @@ static struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)]; #ifdef CONFIG_X86_64 -/* - * XCTR does not have a non-AVX implementation, so it must be enabled - * conditionally. - */ -static struct skcipher_alg aesni_xctr = { - .base = { - .cra_name = "__xctr(aes)", - .cra_driver_name = "__xctr-aes-aesni", - .cra_priority = 400, - .cra_flags = CRYPTO_ALG_INTERNAL, - .cra_blocksize = 1, - .cra_ctxsize = CRYPTO_AES_CTX_SIZE, - .cra_module = THIS_MODULE, - }, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .chunksize = AES_BLOCK_SIZE, - .setkey = aesni_skcipher_setkey, - .encrypt = xctr_crypt, - .decrypt = xctr_crypt, -}; - -static struct simd_skcipher_alg *aesni_simd_xctr; - asmlinkage void aes_xts_encrypt_iv(const struct crypto_aes_ctx *tweak_key, u8 iv[AES_BLOCK_SIZE]); -#define DEFINE_XTS_ALG(suffix, driver_name, priority) \ +/* __always_inline to avoid indirect call */ +static __always_inline int +ctr_crypt(struct skcipher_request *req, + void (*ctr64_func)(const struct crypto_aes_ctx *key, + const u8 *src, u8 *dst, int len, + const u64 le_ctr[2])) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct crypto_aes_ctx *key = aes_ctx(crypto_skcipher_ctx(tfm)); + unsigned int nbytes, p1_nbytes, nblocks; + struct skcipher_walk walk; + u64 le_ctr[2]; + u64 ctr64; + int err; + + ctr64 = le_ctr[0] = get_unaligned_be64(&req->iv[8]); + le_ctr[1] = get_unaligned_be64(&req->iv[0]); + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) != 0) { + if (nbytes < walk.total) { + /* Not the end yet, so keep the length block-aligned. */ + nbytes = round_down(nbytes, AES_BLOCK_SIZE); + nblocks = nbytes / AES_BLOCK_SIZE; + } else { + /* It's the end, so include any final partial block. */ + nblocks = DIV_ROUND_UP(nbytes, AES_BLOCK_SIZE); + } + ctr64 += nblocks; + + kernel_fpu_begin(); + if (likely(ctr64 >= nblocks)) { + /* The low 64 bits of the counter won't overflow. */ + (*ctr64_func)(key, walk.src.virt.addr, + walk.dst.virt.addr, nbytes, le_ctr); + } else { + /* + * The low 64 bits of the counter will overflow. The + * assembly doesn't handle this case, so split the + * operation into two at the point where the overflow + * will occur. After the first part, add the carry bit. + */ + p1_nbytes = min_t(unsigned int, nbytes, + (nblocks - ctr64) * AES_BLOCK_SIZE); + (*ctr64_func)(key, walk.src.virt.addr, + walk.dst.virt.addr, p1_nbytes, le_ctr); + le_ctr[0] = 0; + le_ctr[1]++; + (*ctr64_func)(key, walk.src.virt.addr + p1_nbytes, + walk.dst.virt.addr + p1_nbytes, + nbytes - p1_nbytes, le_ctr); + } + kernel_fpu_end(); + le_ctr[0] = ctr64; + + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); + } + + put_unaligned_be64(ctr64, &req->iv[8]); + put_unaligned_be64(le_ctr[1], &req->iv[0]); + + return err; +} + +/* __always_inline to avoid indirect call */ +static __always_inline int +xctr_crypt(struct skcipher_request *req, + void (*xctr_func)(const struct crypto_aes_ctx *key, + const u8 *src, u8 *dst, int len, + const u8 iv[AES_BLOCK_SIZE], u64 ctr)) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + const struct crypto_aes_ctx *key = aes_ctx(crypto_skcipher_ctx(tfm)); + struct skcipher_walk walk; + unsigned int nbytes; + u64 ctr = 1; + int err; + + err = skcipher_walk_virt(&walk, req, false); + while ((nbytes = walk.nbytes) != 0) { + if (nbytes < walk.total) + nbytes = round_down(nbytes, AES_BLOCK_SIZE); + + kernel_fpu_begin(); + (*xctr_func)(key, walk.src.virt.addr, walk.dst.virt.addr, + nbytes, req->iv, ctr); + kernel_fpu_end(); + + ctr += DIV_ROUND_UP(nbytes, AES_BLOCK_SIZE); + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); + } + return err; +} + +#define DEFINE_AVX_SKCIPHER_ALGS(suffix, driver_name_suffix, priority) \ \ asmlinkage void \ aes_xts_encrypt_##suffix(const struct crypto_aes_ctx *key, const u8 *src, \ @@ -805,32 +772,80 @@ static int xts_decrypt_##suffix(struct skcipher_request *req) \ return xts_crypt(req, aes_xts_encrypt_iv, aes_xts_decrypt_##suffix); \ } \ \ -static struct skcipher_alg aes_xts_alg_##suffix = { \ - .base = { \ - .cra_name = "__xts(aes)", \ - .cra_driver_name = "__" driver_name, \ - .cra_priority = priority, \ - .cra_flags = CRYPTO_ALG_INTERNAL, \ - .cra_blocksize = AES_BLOCK_SIZE, \ - .cra_ctxsize = XTS_AES_CTX_SIZE, \ - .cra_module = THIS_MODULE, \ - }, \ - .min_keysize = 2 * AES_MIN_KEY_SIZE, \ - .max_keysize = 2 * AES_MAX_KEY_SIZE, \ - .ivsize = AES_BLOCK_SIZE, \ - .walksize = 2 * AES_BLOCK_SIZE, \ - .setkey = xts_setkey_aesni, \ - .encrypt = xts_encrypt_##suffix, \ - .decrypt = xts_decrypt_##suffix, \ -}; \ +asmlinkage void \ +aes_ctr64_crypt_##suffix(const struct crypto_aes_ctx *key, \ + const u8 *src, u8 *dst, int len, const u64 le_ctr[2]);\ + \ +static int ctr_crypt_##suffix(struct skcipher_request *req) \ +{ \ + return ctr_crypt(req, aes_ctr64_crypt_##suffix); \ +} \ + \ +asmlinkage void \ +aes_xctr_crypt_##suffix(const struct crypto_aes_ctx *key, \ + const u8 *src, u8 *dst, int len, \ + const u8 iv[AES_BLOCK_SIZE], u64 ctr); \ + \ +static int xctr_crypt_##suffix(struct skcipher_request *req) \ +{ \ + return xctr_crypt(req, aes_xctr_crypt_##suffix); \ +} \ \ -static struct simd_skcipher_alg *aes_xts_simdalg_##suffix +static struct skcipher_alg skcipher_algs_##suffix[] = {{ \ + .base.cra_name = "__xts(aes)", \ + .base.cra_driver_name = "__xts-aes-" driver_name_suffix, \ + .base.cra_priority = priority, \ + .base.cra_flags = CRYPTO_ALG_INTERNAL, \ + .base.cra_blocksize = AES_BLOCK_SIZE, \ + .base.cra_ctxsize = XTS_AES_CTX_SIZE, \ + .base.cra_module = THIS_MODULE, \ + .min_keysize = 2 * AES_MIN_KEY_SIZE, \ + .max_keysize = 2 * AES_MAX_KEY_SIZE, \ + .ivsize = AES_BLOCK_SIZE, \ + .walksize = 2 * AES_BLOCK_SIZE, \ + .setkey = xts_setkey_aesni, \ + .encrypt = xts_encrypt_##suffix, \ + .decrypt = xts_decrypt_##suffix, \ +}, { \ + .base.cra_name = "__ctr(aes)", \ + .base.cra_driver_name = "__ctr-aes-" driver_name_suffix, \ + .base.cra_priority = priority, \ + .base.cra_flags = CRYPTO_ALG_INTERNAL, \ + .base.cra_blocksize = 1, \ + .base.cra_ctxsize = CRYPTO_AES_CTX_SIZE, \ + .base.cra_module = THIS_MODULE, \ + .min_keysize = AES_MIN_KEY_SIZE, \ + .max_keysize = AES_MAX_KEY_SIZE, \ + .ivsize = AES_BLOCK_SIZE, \ + .chunksize = AES_BLOCK_SIZE, \ + .setkey = aesni_skcipher_setkey, \ + .encrypt = ctr_crypt_##suffix, \ + .decrypt = ctr_crypt_##suffix, \ +}, { \ + .base.cra_name = "__xctr(aes)", \ + .base.cra_driver_name = "__xctr-aes-" driver_name_suffix, \ + .base.cra_priority = priority, \ + .base.cra_flags = CRYPTO_ALG_INTERNAL, \ + .base.cra_blocksize = 1, \ + .base.cra_ctxsize = CRYPTO_AES_CTX_SIZE, \ + .base.cra_module = THIS_MODULE, \ + .min_keysize = AES_MIN_KEY_SIZE, \ + .max_keysize = AES_MAX_KEY_SIZE, \ + .ivsize = AES_BLOCK_SIZE, \ + .chunksize = AES_BLOCK_SIZE, \ + .setkey = aesni_skcipher_setkey, \ + .encrypt = xctr_crypt_##suffix, \ + .decrypt = xctr_crypt_##suffix, \ +}}; \ + \ +static struct simd_skcipher_alg * \ +simd_skcipher_algs_##suffix[ARRAY_SIZE(skcipher_algs_##suffix)] -DEFINE_XTS_ALG(aesni_avx, "xts-aes-aesni-avx", 500); +DEFINE_AVX_SKCIPHER_ALGS(aesni_avx, "aesni-avx", 500); #if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) -DEFINE_XTS_ALG(vaes_avx2, "xts-aes-vaes-avx2", 600); -DEFINE_XTS_ALG(vaes_avx10_256, "xts-aes-vaes-avx10_256", 700); -DEFINE_XTS_ALG(vaes_avx10_512, "xts-aes-vaes-avx10_512", 800); +DEFINE_AVX_SKCIPHER_ALGS(vaes_avx2, "vaes-avx2", 600); +DEFINE_AVX_SKCIPHER_ALGS(vaes_avx10_256, "vaes-avx10_256", 700); +DEFINE_AVX_SKCIPHER_ALGS(vaes_avx10_512, "vaes-avx10_512", 800); #endif /* The common part of the x86_64 AES-GCM key struct */ @@ -1291,41 +1306,40 @@ static void gcm_process_assoc(const struct aes_gcm_key *key, u8 ghash_acc[16], scatterwalk_start(&walk, sg_src); while (assoclen) { - unsigned int len_this_page = scatterwalk_clamp(&walk, assoclen); - void *mapped = scatterwalk_map(&walk); - const void *src = mapped; + unsigned int orig_len_this_step = scatterwalk_next( + &walk, assoclen); + unsigned int len_this_step = orig_len_this_step; unsigned int len; + const u8 *src = walk.addr; - assoclen -= len_this_page; - scatterwalk_advance(&walk, len_this_page); if (unlikely(pos)) { - len = min(len_this_page, 16 - pos); + len = min(len_this_step, 16 - pos); memcpy(&buf[pos], src, len); pos += len; src += len; - len_this_page -= len; + len_this_step -= len; if (pos < 16) goto next; aes_gcm_aad_update(key, ghash_acc, buf, 16, flags); pos = 0; } - len = len_this_page; + len = len_this_step; if (unlikely(assoclen)) /* Not the last segment yet? */ len = round_down(len, 16); aes_gcm_aad_update(key, ghash_acc, src, len, flags); src += len; - len_this_page -= len; - if (unlikely(len_this_page)) { - memcpy(buf, src, len_this_page); - pos = len_this_page; + len_this_step -= len; + if (unlikely(len_this_step)) { + memcpy(buf, src, len_this_step); + pos = len_this_step; } next: - scatterwalk_unmap(mapped); - scatterwalk_pagedone(&walk, 0, assoclen); + scatterwalk_done_src(&walk, orig_len_this_step); if (need_resched()) { kernel_fpu_end(); kernel_fpu_begin(); } + assoclen -= orig_len_this_step; } if (unlikely(pos)) aes_gcm_aad_update(key, ghash_acc, buf, pos, flags); @@ -1542,8 +1556,9 @@ static int __init register_avx_algs(void) if (!boot_cpu_has(X86_FEATURE_AVX)) return 0; - err = simd_register_skciphers_compat(&aes_xts_alg_aesni_avx, 1, - &aes_xts_simdalg_aesni_avx); + err = simd_register_skciphers_compat(skcipher_algs_aesni_avx, + ARRAY_SIZE(skcipher_algs_aesni_avx), + simd_skcipher_algs_aesni_avx); if (err) return err; err = simd_register_aeads_compat(aes_gcm_algs_aesni_avx, @@ -1551,6 +1566,12 @@ static int __init register_avx_algs(void) aes_gcm_simdalgs_aesni_avx); if (err) return err; + /* + * Note: not all the algorithms registered below actually require + * VPCLMULQDQ. But in practice every CPU with VAES also has VPCLMULQDQ. + * Similarly, the assembler support was added at about the same time. + * For simplicity, just always check for VAES and VPCLMULQDQ together. + */ #if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) if (!boot_cpu_has(X86_FEATURE_AVX2) || !boot_cpu_has(X86_FEATURE_VAES) || @@ -1558,8 +1579,9 @@ static int __init register_avx_algs(void) !boot_cpu_has(X86_FEATURE_PCLMULQDQ) || !cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) return 0; - err = simd_register_skciphers_compat(&aes_xts_alg_vaes_avx2, 1, - &aes_xts_simdalg_vaes_avx2); + err = simd_register_skciphers_compat(skcipher_algs_vaes_avx2, + ARRAY_SIZE(skcipher_algs_vaes_avx2), + simd_skcipher_algs_vaes_avx2); if (err) return err; @@ -1570,8 +1592,9 @@ static int __init register_avx_algs(void) XFEATURE_MASK_AVX512, NULL)) return 0; - err = simd_register_skciphers_compat(&aes_xts_alg_vaes_avx10_256, 1, - &aes_xts_simdalg_vaes_avx10_256); + err = simd_register_skciphers_compat(skcipher_algs_vaes_avx10_256, + ARRAY_SIZE(skcipher_algs_vaes_avx10_256), + simd_skcipher_algs_vaes_avx10_256); if (err) return err; err = simd_register_aeads_compat(aes_gcm_algs_vaes_avx10_256, @@ -1583,13 +1606,15 @@ static int __init register_avx_algs(void) if (boot_cpu_has(X86_FEATURE_PREFER_YMM)) { int i; - aes_xts_alg_vaes_avx10_512.base.cra_priority = 1; + for (i = 0; i < ARRAY_SIZE(skcipher_algs_vaes_avx10_512); i++) + skcipher_algs_vaes_avx10_512[i].base.cra_priority = 1; for (i = 0; i < ARRAY_SIZE(aes_gcm_algs_vaes_avx10_512); i++) aes_gcm_algs_vaes_avx10_512[i].base.cra_priority = 1; } - err = simd_register_skciphers_compat(&aes_xts_alg_vaes_avx10_512, 1, - &aes_xts_simdalg_vaes_avx10_512); + err = simd_register_skciphers_compat(skcipher_algs_vaes_avx10_512, + ARRAY_SIZE(skcipher_algs_vaes_avx10_512), + simd_skcipher_algs_vaes_avx10_512); if (err) return err; err = simd_register_aeads_compat(aes_gcm_algs_vaes_avx10_512, @@ -1603,27 +1628,31 @@ static int __init register_avx_algs(void) static void unregister_avx_algs(void) { - if (aes_xts_simdalg_aesni_avx) - simd_unregister_skciphers(&aes_xts_alg_aesni_avx, 1, - &aes_xts_simdalg_aesni_avx); + if (simd_skcipher_algs_aesni_avx[0]) + simd_unregister_skciphers(skcipher_algs_aesni_avx, + ARRAY_SIZE(skcipher_algs_aesni_avx), + simd_skcipher_algs_aesni_avx); if (aes_gcm_simdalgs_aesni_avx[0]) simd_unregister_aeads(aes_gcm_algs_aesni_avx, ARRAY_SIZE(aes_gcm_algs_aesni_avx), aes_gcm_simdalgs_aesni_avx); #if defined(CONFIG_AS_VAES) && defined(CONFIG_AS_VPCLMULQDQ) - if (aes_xts_simdalg_vaes_avx2) - simd_unregister_skciphers(&aes_xts_alg_vaes_avx2, 1, - &aes_xts_simdalg_vaes_avx2); - if (aes_xts_simdalg_vaes_avx10_256) - simd_unregister_skciphers(&aes_xts_alg_vaes_avx10_256, 1, - &aes_xts_simdalg_vaes_avx10_256); + if (simd_skcipher_algs_vaes_avx2[0]) + simd_unregister_skciphers(skcipher_algs_vaes_avx2, + ARRAY_SIZE(skcipher_algs_vaes_avx2), + simd_skcipher_algs_vaes_avx2); + if (simd_skcipher_algs_vaes_avx10_256[0]) + simd_unregister_skciphers(skcipher_algs_vaes_avx10_256, + ARRAY_SIZE(skcipher_algs_vaes_avx10_256), + simd_skcipher_algs_vaes_avx10_256); if (aes_gcm_simdalgs_vaes_avx10_256[0]) simd_unregister_aeads(aes_gcm_algs_vaes_avx10_256, ARRAY_SIZE(aes_gcm_algs_vaes_avx10_256), aes_gcm_simdalgs_vaes_avx10_256); - if (aes_xts_simdalg_vaes_avx10_512) - simd_unregister_skciphers(&aes_xts_alg_vaes_avx10_512, 1, - &aes_xts_simdalg_vaes_avx10_512); + if (simd_skcipher_algs_vaes_avx10_512[0]) + simd_unregister_skciphers(skcipher_algs_vaes_avx10_512, + ARRAY_SIZE(skcipher_algs_vaes_avx10_512), + simd_skcipher_algs_vaes_avx10_512); if (aes_gcm_simdalgs_vaes_avx10_512[0]) simd_unregister_aeads(aes_gcm_algs_vaes_avx10_512, ARRAY_SIZE(aes_gcm_algs_vaes_avx10_512), @@ -1656,13 +1685,6 @@ static int __init aesni_init(void) if (!x86_match_cpu(aesni_cpu_id)) return -ENODEV; -#ifdef CONFIG_X86_64 - if (boot_cpu_has(X86_FEATURE_AVX)) { - /* optimize performance of ctr mode encryption transform */ - static_call_update(aesni_ctr_enc_tfm, aesni_ctr_enc_avx_tfm); - pr_info("AES CTR mode by8 optimization enabled\n"); - } -#endif /* CONFIG_X86_64 */ err = crypto_register_alg(&aesni_cipher_alg); if (err) @@ -1680,14 +1702,6 @@ static int __init aesni_init(void) if (err) goto unregister_skciphers; -#ifdef CONFIG_X86_64 - if (boot_cpu_has(X86_FEATURE_AVX)) - err = simd_register_skciphers_compat(&aesni_xctr, 1, - &aesni_simd_xctr); - if (err) - goto unregister_aeads; -#endif /* CONFIG_X86_64 */ - err = register_avx_algs(); if (err) goto unregister_avx; @@ -1696,11 +1710,6 @@ static int __init aesni_init(void) unregister_avx: unregister_avx_algs(); -#ifdef CONFIG_X86_64 - if (aesni_simd_xctr) - simd_unregister_skciphers(&aesni_xctr, 1, &aesni_simd_xctr); -unregister_aeads: -#endif /* CONFIG_X86_64 */ simd_unregister_aeads(aes_gcm_algs_aesni, ARRAY_SIZE(aes_gcm_algs_aesni), aes_gcm_simdalgs_aesni); @@ -1720,10 +1729,6 @@ static void __exit aesni_exit(void) simd_unregister_skciphers(aesni_skciphers, ARRAY_SIZE(aesni_skciphers), aesni_simd_skciphers); crypto_unregister_alg(&aesni_cipher_alg); -#ifdef CONFIG_X86_64 - if (boot_cpu_has(X86_FEATURE_AVX)) - simd_unregister_skciphers(&aesni_xctr, 1, &aesni_simd_xctr); -#endif /* CONFIG_X86_64 */ unregister_avx_algs(); } diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c index 7b3a1cf0984b..8bb74a272879 100644 --- a/arch/x86/crypto/chacha_glue.c +++ b/arch/x86/crypto/chacha_glue.c @@ -133,12 +133,6 @@ void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds) } EXPORT_SYMBOL(hchacha_block_arch); -void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv) -{ - chacha_init_generic(state, key, iv); -} -EXPORT_SYMBOL(chacha_init_arch); - void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, int nrounds) { @@ -169,7 +163,7 @@ static int chacha_simd_stream_xor(struct skcipher_request *req, err = skcipher_walk_virt(&walk, req, false); - chacha_init_generic(state, ctx->key, iv); + chacha_init(state, ctx->key, iv); while (walk.nbytes > 0) { unsigned int nbytes = walk.nbytes; @@ -211,7 +205,7 @@ static int xchacha_simd(struct skcipher_request *req) struct chacha_ctx subctx; u8 real_iv[16]; - chacha_init_generic(state, ctx->key, req->iv); + chacha_init(state, ctx->key, req->iv); if (req->cryptlen > CHACHA_BLOCK_SIZE && crypto_simd_usable()) { kernel_fpu_begin(); diff --git a/arch/x86/crypto/des3_ede_glue.c b/arch/x86/crypto/des3_ede_glue.c index e88439d3828e..34600f90d8a6 100644 --- a/arch/x86/crypto/des3_ede_glue.c +++ b/arch/x86/crypto/des3_ede_glue.c @@ -73,7 +73,7 @@ static int ecb_crypt(struct skcipher_request *req, const u32 *expkey) err = skcipher_walk_virt(&walk, req, false); while ((nbytes = walk.nbytes)) { - u8 *wsrc = walk.src.virt.addr; + const u8 *wsrc = walk.src.virt.addr; u8 *wdst = walk.dst.virt.addr; /* Process four block batch */ diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c index 41bc02e48916..c759ec808bf1 100644 --- a/arch/x86/crypto/ghash-clmulni-intel_glue.c +++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c @@ -189,6 +189,20 @@ static int ghash_async_init(struct ahash_request *req) return crypto_shash_init(desc); } +static void ghash_init_cryptd_req(struct ahash_request *req) +{ + struct ahash_request *cryptd_req = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); + struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; + + ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); + ahash_request_set_callback(cryptd_req, req->base.flags, + req->base.complete, req->base.data); + ahash_request_set_crypt(cryptd_req, req->src, req->result, + req->nbytes); +} + static int ghash_async_update(struct ahash_request *req) { struct ahash_request *cryptd_req = ahash_request_ctx(req); @@ -198,8 +212,7 @@ static int ghash_async_update(struct ahash_request *req) if (!crypto_simd_usable() || (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { - memcpy(cryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); + ghash_init_cryptd_req(req); return crypto_ahash_update(cryptd_req); } else { struct shash_desc *desc = cryptd_shash_desc(cryptd_req); @@ -216,8 +229,7 @@ static int ghash_async_final(struct ahash_request *req) if (!crypto_simd_usable() || (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { - memcpy(cryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); + ghash_init_cryptd_req(req); return crypto_ahash_final(cryptd_req); } else { struct shash_desc *desc = cryptd_shash_desc(cryptd_req); @@ -257,8 +269,7 @@ static int ghash_async_digest(struct ahash_request *req) if (!crypto_simd_usable() || (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { - memcpy(cryptd_req, req, sizeof(*req)); - ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); + ghash_init_cryptd_req(req); return crypto_ahash_digest(cryptd_req); } else { struct shash_desc *desc = cryptd_shash_desc(cryptd_req); diff --git a/crypto/842.c b/crypto/842.c index e59e54d76960..5fb37a925989 100644 --- a/crypto/842.c +++ b/crypto/842.c @@ -18,17 +18,16 @@ * drivers/crypto/nx/nx-842-crypto.c */ +#include <crypto/internal/scompress.h> #include <linux/init.h> #include <linux/module.h> -#include <linux/crypto.h> #include <linux/sw842.h> -#include <crypto/internal/scompress.h> struct crypto842_ctx { void *wmem; /* working memory for compress */ }; -static void *crypto842_alloc_ctx(struct crypto_scomp *tfm) +static void *crypto842_alloc_ctx(void) { void *ctx; @@ -39,38 +38,11 @@ static void *crypto842_alloc_ctx(struct crypto_scomp *tfm) return ctx; } -static int crypto842_init(struct crypto_tfm *tfm) -{ - struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm); - - ctx->wmem = crypto842_alloc_ctx(NULL); - if (IS_ERR(ctx->wmem)) - return -ENOMEM; - - return 0; -} - -static void crypto842_free_ctx(struct crypto_scomp *tfm, void *ctx) +static void crypto842_free_ctx(void *ctx) { kfree(ctx); } -static void crypto842_exit(struct crypto_tfm *tfm) -{ - struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm); - - crypto842_free_ctx(NULL, ctx->wmem); -} - -static int crypto842_compress(struct crypto_tfm *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) -{ - struct crypto842_ctx *ctx = crypto_tfm_ctx(tfm); - - return sw842_compress(src, slen, dst, dlen, ctx->wmem); -} - static int crypto842_scompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) @@ -78,13 +50,6 @@ static int crypto842_scompress(struct crypto_scomp *tfm, return sw842_compress(src, slen, dst, dlen, ctx); } -static int crypto842_decompress(struct crypto_tfm *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) -{ - return sw842_decompress(src, slen, dst, dlen); -} - static int crypto842_sdecompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) @@ -92,20 +57,6 @@ static int crypto842_sdecompress(struct crypto_scomp *tfm, return sw842_decompress(src, slen, dst, dlen); } -static struct crypto_alg alg = { - .cra_name = "842", - .cra_driver_name = "842-generic", - .cra_priority = 100, - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct crypto842_ctx), - .cra_module = THIS_MODULE, - .cra_init = crypto842_init, - .cra_exit = crypto842_exit, - .cra_u = { .compress = { - .coa_compress = crypto842_compress, - .coa_decompress = crypto842_decompress } } -}; - static struct scomp_alg scomp = { .alloc_ctx = crypto842_alloc_ctx, .free_ctx = crypto842_free_ctx, @@ -121,25 +72,12 @@ static struct scomp_alg scomp = { static int __init crypto842_mod_init(void) { - int ret; - - ret = crypto_register_alg(&alg); - if (ret) - return ret; - - ret = crypto_register_scomp(&scomp); - if (ret) { - crypto_unregister_alg(&alg); - return ret; - } - - return ret; + return crypto_register_scomp(&scomp); } subsys_initcall(crypto842_mod_init); static void __exit crypto842_mod_exit(void) { - crypto_unregister_alg(&alg); crypto_unregister_scomp(&scomp); } module_exit(crypto842_mod_exit); diff --git a/crypto/Kconfig b/crypto/Kconfig index 69bbe696b94d..dbf97c4e7c59 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -234,6 +234,18 @@ config CRYPTO_AUTHENC This is required for IPSec ESP (XFRM_ESP). +config CRYPTO_KRB5ENC + tristate "Kerberos 5 combined hash+cipher support" + select CRYPTO_AEAD + select CRYPTO_SKCIPHER + select CRYPTO_MANAGER + select CRYPTO_HASH + select CRYPTO_NULL + help + Combined hash and cipher support for Kerberos 5 RFC3961 simplified + profile. This is required for Kerberos 5-style encryption, used by + sunrpc/NFS and rxrpc/AFS. + config CRYPTO_TEST tristate "Testing module" depends on m || EXPERT @@ -324,6 +336,7 @@ config CRYPTO_CURVE25519 tristate "Curve25519" select CRYPTO_KPP select CRYPTO_LIB_CURVE25519_GENERIC + select CRYPTO_LIB_CURVE25519_INTERNAL help Curve25519 elliptic curve (RFC7748) @@ -622,6 +635,7 @@ config CRYPTO_ARC4 config CRYPTO_CHACHA20 tristate "ChaCha" select CRYPTO_LIB_CHACHA_GENERIC + select CRYPTO_LIB_CHACHA_INTERNAL select CRYPTO_SKCIPHER help The ChaCha20, XChaCha20, and XChaCha12 stream cipher algorithms @@ -943,6 +957,7 @@ config CRYPTO_POLY1305 tristate "Poly1305" select CRYPTO_HASH select CRYPTO_LIB_POLY1305_GENERIC + select CRYPTO_LIB_POLY1305_INTERNAL help Poly1305 authenticator algorithm (RFC7539) @@ -1446,5 +1461,6 @@ endif source "drivers/crypto/Kconfig" source "crypto/asymmetric_keys/Kconfig" source "certs/Kconfig" +source "crypto/krb5/Kconfig" endif # if CRYPTO diff --git a/crypto/Makefile b/crypto/Makefile index c95e95e75ad4..0e6ab5ffd3f7 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -4,7 +4,7 @@ # obj-$(CONFIG_CRYPTO) += crypto.o -crypto-y := api.o cipher.o compress.o +crypto-y := api.o cipher.o obj-$(CONFIG_CRYPTO_ENGINE) += crypto_engine.o obj-$(CONFIG_CRYPTO_FIPS) += fips.o @@ -157,6 +157,7 @@ obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o CFLAGS_crc32c_generic.o += -DARCH=$(ARCH) CFLAGS_crc32_generic.o += -DARCH=$(ARCH) obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o +obj-$(CONFIG_CRYPTO_KRB5ENC) += krb5enc.o obj-$(CONFIG_CRYPTO_LZO) += lzo.o lzo-rle.o obj-$(CONFIG_CRYPTO_LZ4) += lz4.o obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o @@ -210,3 +211,5 @@ obj-$(CONFIG_CRYPTO_SIMD) += crypto_simd.o # Key derivation function # obj-$(CONFIG_CRYPTO_KDF800108_CTR) += kdf_sp800108.o + +obj-$(CONFIG_CRYPTO_KRB5) += krb5/ diff --git a/crypto/acompress.c b/crypto/acompress.c index 6fdf0ff9f3c0..f7a3fbe5447e 100644 --- a/crypto/acompress.c +++ b/crypto/acompress.c @@ -12,6 +12,7 @@ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/page-flags.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/string.h> @@ -23,6 +24,8 @@ struct crypto_scomp; static const struct crypto_type crypto_acomp_type; +static void acomp_reqchain_done(void *data, int err); + static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg) { return container_of(alg, struct acomp_alg, calg.base); @@ -58,29 +61,56 @@ static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm) struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm); struct acomp_alg *alg = crypto_acomp_alg(acomp); - alg->exit(acomp); + if (alg->exit) + alg->exit(acomp); + + if (acomp_is_async(acomp)) + crypto_free_acomp(acomp->fb); } static int crypto_acomp_init_tfm(struct crypto_tfm *tfm) { struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm); struct acomp_alg *alg = crypto_acomp_alg(acomp); + struct crypto_acomp *fb = NULL; + int err; + + acomp->fb = acomp; if (tfm->__crt_alg->cra_type != &crypto_acomp_type) return crypto_init_scomp_ops_async(tfm); + if (acomp_is_async(acomp)) { + fb = crypto_alloc_acomp(crypto_acomp_alg_name(acomp), 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(fb)) + return PTR_ERR(fb); + + err = -EINVAL; + if (crypto_acomp_reqsize(fb) > MAX_SYNC_COMP_REQSIZE) + goto out_free_fb; + + acomp->fb = fb; + } + acomp->compress = alg->compress; acomp->decompress = alg->decompress; - acomp->dst_free = alg->dst_free; acomp->reqsize = alg->reqsize; - if (alg->exit) - acomp->base.exit = crypto_acomp_exit_tfm; + acomp->base.exit = crypto_acomp_exit_tfm; + + if (!alg->init) + return 0; - if (alg->init) - return alg->init(acomp); + err = alg->init(acomp); + if (err) + goto out_free_fb; return 0; + +out_free_fb: + crypto_free_acomp(fb); + return err; } static unsigned int crypto_acomp_extsize(struct crypto_alg *alg) @@ -123,35 +153,231 @@ struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type, } EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node); -struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp) +static void acomp_save_req(struct acomp_req *req, crypto_completion_t cplt) { - struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); - struct acomp_req *req; + struct acomp_req_chain *state = &req->chain; + + state->compl = req->base.complete; + state->data = req->base.data; + req->base.complete = cplt; + req->base.data = state; + state->req0 = req; +} - req = __acomp_request_alloc(acomp); - if (req && (tfm->__crt_alg->cra_type != &crypto_acomp_type)) - return crypto_acomp_scomp_alloc_ctx(req); +static void acomp_restore_req(struct acomp_req *req) +{ + struct acomp_req_chain *state = req->base.data; - return req; + req->base.complete = state->compl; + req->base.data = state->data; } -EXPORT_SYMBOL_GPL(acomp_request_alloc); -void acomp_request_free(struct acomp_req *req) +static void acomp_reqchain_virt(struct acomp_req_chain *state, int err) { - struct crypto_acomp *acomp = crypto_acomp_reqtfm(req); - struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); + struct acomp_req *req = state->cur; + unsigned int slen = req->slen; + unsigned int dlen = req->dlen; + + req->base.err = err; + state = &req->chain; + + if (state->flags & CRYPTO_ACOMP_REQ_SRC_VIRT) + acomp_request_set_src_dma(req, state->src, slen); + else if (state->flags & CRYPTO_ACOMP_REQ_SRC_FOLIO) + acomp_request_set_src_folio(req, state->sfolio, state->soff, slen); + if (state->flags & CRYPTO_ACOMP_REQ_DST_VIRT) + acomp_request_set_dst_dma(req, state->dst, dlen); + else if (state->flags & CRYPTO_ACOMP_REQ_DST_FOLIO) + acomp_request_set_dst_folio(req, state->dfolio, state->doff, dlen); +} - if (tfm->__crt_alg->cra_type != &crypto_acomp_type) - crypto_acomp_scomp_free_ctx(req); +static void acomp_virt_to_sg(struct acomp_req *req) +{ + struct acomp_req_chain *state = &req->chain; + + state->flags = req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT | + CRYPTO_ACOMP_REQ_DST_VIRT | + CRYPTO_ACOMP_REQ_SRC_FOLIO | + CRYPTO_ACOMP_REQ_DST_FOLIO); + + if (acomp_request_src_isvirt(req)) { + unsigned int slen = req->slen; + const u8 *svirt = req->svirt; + + state->src = svirt; + sg_init_one(&state->ssg, svirt, slen); + acomp_request_set_src_sg(req, &state->ssg, slen); + } else if (acomp_request_src_isfolio(req)) { + struct folio *folio = req->sfolio; + unsigned int slen = req->slen; + size_t off = req->soff; + + state->sfolio = folio; + state->soff = off; + sg_init_table(&state->ssg, 1); + sg_set_page(&state->ssg, folio_page(folio, off / PAGE_SIZE), + slen, off % PAGE_SIZE); + acomp_request_set_src_sg(req, &state->ssg, slen); + } - if (req->flags & CRYPTO_ACOMP_ALLOC_OUTPUT) { - acomp->dst_free(req->dst); - req->dst = NULL; + if (acomp_request_dst_isvirt(req)) { + unsigned int dlen = req->dlen; + u8 *dvirt = req->dvirt; + + state->dst = dvirt; + sg_init_one(&state->dsg, dvirt, dlen); + acomp_request_set_dst_sg(req, &state->dsg, dlen); + } else if (acomp_request_dst_isfolio(req)) { + struct folio *folio = req->dfolio; + unsigned int dlen = req->dlen; + size_t off = req->doff; + + state->dfolio = folio; + state->doff = off; + sg_init_table(&state->dsg, 1); + sg_set_page(&state->dsg, folio_page(folio, off / PAGE_SIZE), + dlen, off % PAGE_SIZE); + acomp_request_set_src_sg(req, &state->dsg, dlen); } +} + +static int acomp_do_nondma(struct acomp_req_chain *state, + struct acomp_req *req) +{ + u32 keep = CRYPTO_ACOMP_REQ_SRC_VIRT | + CRYPTO_ACOMP_REQ_SRC_NONDMA | + CRYPTO_ACOMP_REQ_DST_VIRT | + CRYPTO_ACOMP_REQ_DST_NONDMA; + ACOMP_REQUEST_ON_STACK(fbreq, crypto_acomp_reqtfm(req)); + int err; + + acomp_request_set_callback(fbreq, req->base.flags, NULL, NULL); + fbreq->base.flags &= ~keep; + fbreq->base.flags |= req->base.flags & keep; + fbreq->src = req->src; + fbreq->dst = req->dst; + fbreq->slen = req->slen; + fbreq->dlen = req->dlen; + + if (state->op == crypto_acomp_reqtfm(req)->compress) + err = crypto_acomp_compress(fbreq); + else + err = crypto_acomp_decompress(fbreq); + + req->dlen = fbreq->dlen; + return err; +} + +static int acomp_do_one_req(struct acomp_req_chain *state, + struct acomp_req *req) +{ + state->cur = req; + + if (acomp_request_isnondma(req)) + return acomp_do_nondma(state, req); + + acomp_virt_to_sg(req); + return state->op(req); +} + +static int acomp_reqchain_finish(struct acomp_req *req0, int err, u32 mask) +{ + struct acomp_req_chain *state = req0->base.data; + struct acomp_req *req = state->cur; + struct acomp_req *n; + + acomp_reqchain_virt(state, err); + + if (req != req0) + list_add_tail(&req->base.list, &req0->base.list); - __acomp_request_free(req); + list_for_each_entry_safe(req, n, &state->head, base.list) { + list_del_init(&req->base.list); + + req->base.flags &= mask; + req->base.complete = acomp_reqchain_done; + req->base.data = state; + + err = acomp_do_one_req(state, req); + + if (err == -EINPROGRESS) { + if (!list_empty(&state->head)) + err = -EBUSY; + goto out; + } + + if (err == -EBUSY) + goto out; + + acomp_reqchain_virt(state, err); + list_add_tail(&req->base.list, &req0->base.list); + } + + acomp_restore_req(req0); + +out: + return err; +} + +static void acomp_reqchain_done(void *data, int err) +{ + struct acomp_req_chain *state = data; + crypto_completion_t compl = state->compl; + + data = state->data; + + if (err == -EINPROGRESS) { + if (!list_empty(&state->head)) + return; + goto notify; + } + + err = acomp_reqchain_finish(state->req0, err, + CRYPTO_TFM_REQ_MAY_BACKLOG); + if (err == -EBUSY) + return; + +notify: + compl(data, err); +} + +static int acomp_do_req_chain(struct acomp_req *req, + int (*op)(struct acomp_req *req)) +{ + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + struct acomp_req_chain *state; + int err; + + if (crypto_acomp_req_chain(tfm) || + (!acomp_request_chained(req) && acomp_request_issg(req))) + return op(req); + + acomp_save_req(req, acomp_reqchain_done); + state = req->base.data; + + state->op = op; + state->src = NULL; + INIT_LIST_HEAD(&state->head); + list_splice_init(&req->base.list, &state->head); + + err = acomp_do_one_req(state, req); + if (err == -EBUSY || err == -EINPROGRESS) + return -EBUSY; + + return acomp_reqchain_finish(req, err, ~0); +} + +int crypto_acomp_compress(struct acomp_req *req) +{ + return acomp_do_req_chain(req, crypto_acomp_reqtfm(req)->compress); +} +EXPORT_SYMBOL_GPL(crypto_acomp_compress); + +int crypto_acomp_decompress(struct acomp_req *req) +{ + return acomp_do_req_chain(req, crypto_acomp_reqtfm(req)->decompress); } -EXPORT_SYMBOL_GPL(acomp_request_free); +EXPORT_SYMBOL_GPL(crypto_acomp_decompress); void comp_prepare_alg(struct comp_alg_common *alg) { diff --git a/crypto/aead.c b/crypto/aead.c index cade532413bf..12f5b42171af 100644 --- a/crypto/aead.c +++ b/crypto/aead.c @@ -16,6 +16,7 @@ #include <linux/slab.h> #include <linux/seq_file.h> #include <linux/string.h> +#include <linux/string_choices.h> #include <net/netlink.h> #include "internal.h" @@ -156,8 +157,8 @@ static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg) struct aead_alg *aead = container_of(alg, struct aead_alg, base); seq_printf(m, "type : aead\n"); - seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? - "yes" : "no"); + seq_printf(m, "async : %s\n", + str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC)); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "ivsize : %u\n", aead->ivsize); seq_printf(m, "maxauthsize : %u\n", aead->maxauthsize); diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c index 6cbff298722b..72f6ee1345ef 100644 --- a/crypto/aegis128-core.c +++ b/crypto/aegis128-core.c @@ -284,10 +284,9 @@ static void crypto_aegis128_process_ad(struct aegis_state *state, scatterwalk_start(&walk, sg_src); while (assoclen != 0) { - unsigned int size = scatterwalk_clamp(&walk, assoclen); + unsigned int size = scatterwalk_next(&walk, assoclen); + const u8 *src = walk.addr; unsigned int left = size; - void *mapped = scatterwalk_map(&walk); - const u8 *src = (const u8 *)mapped; if (pos + size >= AEGIS_BLOCK_SIZE) { if (pos > 0) { @@ -308,9 +307,7 @@ static void crypto_aegis128_process_ad(struct aegis_state *state, pos += left; assoclen -= size; - scatterwalk_unmap(mapped); - scatterwalk_advance(&walk, size); - scatterwalk_done(&walk, 0, assoclen); + scatterwalk_done_src(&walk, size); } if (pos > 0) { diff --git a/crypto/ahash.c b/crypto/ahash.c index b08b89ec26ec..9f57b925b116 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -16,11 +16,13 @@ #include <linux/cryptouser.h> #include <linux/err.h> #include <linux/kernel.h> +#include <linux/mm.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/seq_file.h> #include <linux/string.h> +#include <linux/string_choices.h> #include <net/netlink.h> #include "hash.h" @@ -28,7 +30,7 @@ #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e struct crypto_hash_walk { - char *data; + const char *data; unsigned int offset; unsigned int flags; @@ -40,6 +42,27 @@ struct crypto_hash_walk { struct scatterlist *sg; }; +struct ahash_save_req_state { + struct list_head head; + struct ahash_request *req0; + struct ahash_request *cur; + int (*op)(struct ahash_request *req); + crypto_completion_t compl; + void *data; + struct scatterlist sg; + const u8 *src; + u8 *page; + unsigned int offset; + unsigned int nbytes; +}; + +static void ahash_reqchain_done(void *data, int err); +static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt); +static void ahash_restore_req(struct ahash_request *req); +static void ahash_def_finup_done1(void *data, int err); +static int ahash_def_finup_finish1(struct ahash_request *req, int err); +static int ahash_def_finup(struct ahash_request *req); + static int hash_walk_next(struct crypto_hash_walk *walk) { unsigned int offset = walk->offset; @@ -58,7 +81,7 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk) sg = walk->sg; walk->offset = sg->offset; - walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); + walk->pg = nth_page(sg_page(walk->sg), (walk->offset >> PAGE_SHIFT)); walk->offset = offset_in_page(walk->offset); walk->entrylen = sg->length; @@ -73,20 +96,29 @@ static int crypto_hash_walk_first(struct ahash_request *req, struct crypto_hash_walk *walk) { walk->total = req->nbytes; + walk->entrylen = 0; - if (!walk->total) { - walk->entrylen = 0; + if (!walk->total) return 0; + + walk->flags = req->base.flags; + + if (ahash_request_isvirt(req)) { + walk->data = req->svirt; + walk->total = 0; + return req->nbytes; } walk->sg = req->src; - walk->flags = req->base.flags; return hash_walk_new_entry(walk); } static int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) { + if ((walk->flags & CRYPTO_AHASH_REQ_VIRT)) + return err; + walk->data -= walk->offset; kunmap_local(walk->data); @@ -171,21 +203,36 @@ int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) unsigned int nbytes = req->nbytes; struct scatterlist *sg; unsigned int offset; + struct page *page; + const u8 *data; int err; - if (nbytes && - (sg = req->src, offset = sg->offset, - nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) { - void *data; + data = req->svirt; + if (!nbytes || ahash_request_isvirt(req)) + return crypto_shash_digest(desc, data, nbytes, req->result); + + sg = req->src; + if (nbytes > sg->length) + return crypto_shash_init(desc) ?: + shash_ahash_finup(req, desc); + + page = sg_page(sg); + offset = sg->offset; + data = lowmem_page_address(page) + offset; + if (!IS_ENABLED(CONFIG_HIGHMEM)) + return crypto_shash_digest(desc, data, nbytes, req->result); + + page = nth_page(page, offset >> PAGE_SHIFT); + offset = offset_in_page(offset); - data = kmap_local_page(sg_page(sg)); - err = crypto_shash_digest(desc, data + offset, nbytes, - req->result); - kunmap_local(data); - } else - err = crypto_shash_init(desc) ?: - shash_ahash_finup(req, desc); + if (nbytes > (unsigned int)PAGE_SIZE - offset) + return crypto_shash_init(desc) ?: + shash_ahash_finup(req, desc); + data = kmap_local_page(page); + err = crypto_shash_digest(desc, data + offset, nbytes, + req->result); + kunmap_local(data); return err; } EXPORT_SYMBOL_GPL(shash_ahash_digest); @@ -266,89 +313,330 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, } EXPORT_SYMBOL_GPL(crypto_ahash_setkey); +static bool ahash_request_hasvirt(struct ahash_request *req) +{ + struct ahash_request *r2; + + if (ahash_request_isvirt(req)) + return true; + + list_for_each_entry(r2, &req->base.list, base.list) + if (ahash_request_isvirt(r2)) + return true; + + return false; +} + +static int ahash_reqchain_virt(struct ahash_save_req_state *state, + int err, u32 mask) +{ + struct ahash_request *req = state->cur; + + for (;;) { + unsigned len = state->nbytes; + + req->base.err = err; + + if (!state->offset) + break; + + if (state->offset == len || err) { + u8 *result = req->result; + + ahash_request_set_virt(req, state->src, result, len); + state->offset = 0; + break; + } + + len -= state->offset; + + len = min(PAGE_SIZE, len); + memcpy(state->page, state->src + state->offset, len); + state->offset += len; + req->nbytes = len; + + err = state->op(req); + if (err == -EINPROGRESS) { + if (!list_empty(&state->head) || + state->offset < state->nbytes) + err = -EBUSY; + break; + } + + if (err == -EBUSY) + break; + } + + return err; +} + +static int ahash_reqchain_finish(struct ahash_request *req0, + struct ahash_save_req_state *state, + int err, u32 mask) +{ + struct ahash_request *req = state->cur; + struct crypto_ahash *tfm; + struct ahash_request *n; + bool update; + u8 *page; + + err = ahash_reqchain_virt(state, err, mask); + if (err == -EINPROGRESS || err == -EBUSY) + goto out; + + if (req != req0) + list_add_tail(&req->base.list, &req0->base.list); + + tfm = crypto_ahash_reqtfm(req); + update = state->op == crypto_ahash_alg(tfm)->update; + + list_for_each_entry_safe(req, n, &state->head, base.list) { + list_del_init(&req->base.list); + + req->base.flags &= mask; + req->base.complete = ahash_reqchain_done; + req->base.data = state; + state->cur = req; + + if (update && ahash_request_isvirt(req) && req->nbytes) { + unsigned len = req->nbytes; + u8 *result = req->result; + + state->src = req->svirt; + state->nbytes = len; + + len = min(PAGE_SIZE, len); + + memcpy(state->page, req->svirt, len); + state->offset = len; + + ahash_request_set_crypt(req, &state->sg, result, len); + } + + err = state->op(req); + + if (err == -EINPROGRESS) { + if (!list_empty(&state->head) || + state->offset < state->nbytes) + err = -EBUSY; + goto out; + } + + if (err == -EBUSY) + goto out; + + err = ahash_reqchain_virt(state, err, mask); + if (err == -EINPROGRESS || err == -EBUSY) + goto out; + + list_add_tail(&req->base.list, &req0->base.list); + } + + page = state->page; + if (page) { + memset(page, 0, PAGE_SIZE); + free_page((unsigned long)page); + } + ahash_restore_req(req0); + +out: + return err; +} + +static void ahash_reqchain_done(void *data, int err) +{ + struct ahash_save_req_state *state = data; + crypto_completion_t compl = state->compl; + + data = state->data; + + if (err == -EINPROGRESS) { + if (!list_empty(&state->head) || state->offset < state->nbytes) + return; + goto notify; + } + + err = ahash_reqchain_finish(state->req0, state, err, + CRYPTO_TFM_REQ_MAY_BACKLOG); + if (err == -EBUSY) + return; + +notify: + compl(data, err); +} + +static int ahash_do_req_chain(struct ahash_request *req, + int (*op)(struct ahash_request *req)) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + bool update = op == crypto_ahash_alg(tfm)->update; + struct ahash_save_req_state *state; + struct ahash_save_req_state state0; + struct ahash_request *r2; + u8 *page = NULL; + int err; + + if (crypto_ahash_req_chain(tfm) || + (!ahash_request_chained(req) && + (!update || !ahash_request_isvirt(req)))) + return op(req); + + if (update && ahash_request_hasvirt(req)) { + gfp_t gfp; + u32 flags; + + flags = ahash_request_flags(req); + gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? + GFP_KERNEL : GFP_ATOMIC; + page = (void *)__get_free_page(gfp); + err = -ENOMEM; + if (!page) + goto out_set_chain; + } + + state = &state0; + if (ahash_is_async(tfm)) { + err = ahash_save_req(req, ahash_reqchain_done); + if (err) + goto out_free_page; + + state = req->base.data; + } + + state->op = op; + state->cur = req; + state->page = page; + state->offset = 0; + state->nbytes = 0; + INIT_LIST_HEAD(&state->head); + list_splice_init(&req->base.list, &state->head); + + if (page) + sg_init_one(&state->sg, page, PAGE_SIZE); + + if (update && ahash_request_isvirt(req) && req->nbytes) { + unsigned len = req->nbytes; + u8 *result = req->result; + + state->src = req->svirt; + state->nbytes = len; + + len = min(PAGE_SIZE, len); + + memcpy(page, req->svirt, len); + state->offset = len; + + ahash_request_set_crypt(req, &state->sg, result, len); + } + + err = op(req); + if (err == -EBUSY || err == -EINPROGRESS) + return -EBUSY; + + return ahash_reqchain_finish(req, state, err, ~0); + +out_free_page: + free_page((unsigned long)page); + +out_set_chain: + req->base.err = err; + list_for_each_entry(r2, &req->base.list, base.list) + r2->base.err = err; + + return err; +} + int crypto_ahash_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - if (likely(tfm->using_shash)) - return crypto_shash_init(prepare_shash_desc(req, tfm)); + if (likely(tfm->using_shash)) { + struct ahash_request *r2; + int err; + + err = crypto_shash_init(prepare_shash_desc(req, tfm)); + req->base.err = err; + + list_for_each_entry(r2, &req->base.list, base.list) { + struct shash_desc *desc; + + desc = prepare_shash_desc(r2, tfm); + r2->base.err = crypto_shash_init(desc); + } + + return err; + } + if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; - return crypto_ahash_alg(tfm)->init(req); + + return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->init); } EXPORT_SYMBOL_GPL(crypto_ahash_init); -static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt, - bool has_state) +static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - unsigned int ds = crypto_ahash_digestsize(tfm); - struct ahash_request *subreq; - unsigned int subreq_size; - unsigned int reqsize; - u8 *result; + struct ahash_save_req_state *state; gfp_t gfp; u32 flags; - subreq_size = sizeof(*subreq); - reqsize = crypto_ahash_reqsize(tfm); - reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment()); - subreq_size += reqsize; - subreq_size += ds; + if (!ahash_is_async(tfm)) + return 0; flags = ahash_request_flags(req); gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; - subreq = kmalloc(subreq_size, gfp); - if (!subreq) + state = kmalloc(sizeof(*state), gfp); + if (!state) return -ENOMEM; - ahash_request_set_tfm(subreq, tfm); - ahash_request_set_callback(subreq, flags, cplt, req); - - result = (u8 *)(subreq + 1) + reqsize; - - ahash_request_set_crypt(subreq, req->src, result, req->nbytes); - - if (has_state) { - void *state; - - state = kmalloc(crypto_ahash_statesize(tfm), gfp); - if (!state) { - kfree(subreq); - return -ENOMEM; - } - - crypto_ahash_export(req, state); - crypto_ahash_import(subreq, state); - kfree_sensitive(state); - } - - req->priv = subreq; + state->compl = req->base.complete; + state->data = req->base.data; + req->base.complete = cplt; + req->base.data = state; + state->req0 = req; return 0; } -static void ahash_restore_req(struct ahash_request *req, int err) +static void ahash_restore_req(struct ahash_request *req) { - struct ahash_request *subreq = req->priv; + struct ahash_save_req_state *state; + struct crypto_ahash *tfm; - if (!err) - memcpy(req->result, subreq->result, - crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); + tfm = crypto_ahash_reqtfm(req); + if (!ahash_is_async(tfm)) + return; - req->priv = NULL; + state = req->base.data; - kfree_sensitive(subreq); + req->base.complete = state->compl; + req->base.data = state->data; + kfree(state); } int crypto_ahash_update(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - if (likely(tfm->using_shash)) - return shash_ahash_update(req, ahash_request_ctx(req)); + if (likely(tfm->using_shash)) { + struct ahash_request *r2; + int err; + + err = shash_ahash_update(req, ahash_request_ctx(req)); + req->base.err = err; + + list_for_each_entry(r2, &req->base.list, base.list) { + struct shash_desc *desc; + + desc = ahash_request_ctx(r2); + r2->base.err = shash_ahash_update(r2, desc); + } - return crypto_ahash_alg(tfm)->update(req); + return err; + } + + return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->update); } EXPORT_SYMBOL_GPL(crypto_ahash_update); @@ -356,10 +644,24 @@ int crypto_ahash_final(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - if (likely(tfm->using_shash)) - return crypto_shash_final(ahash_request_ctx(req), req->result); + if (likely(tfm->using_shash)) { + struct ahash_request *r2; + int err; - return crypto_ahash_alg(tfm)->final(req); + err = crypto_shash_final(ahash_request_ctx(req), req->result); + req->base.err = err; + + list_for_each_entry(r2, &req->base.list, base.list) { + struct shash_desc *desc; + + desc = ahash_request_ctx(r2); + r2->base.err = crypto_shash_final(desc, r2->result); + } + + return err; + } + + return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->final); } EXPORT_SYMBOL_GPL(crypto_ahash_final); @@ -367,86 +669,182 @@ int crypto_ahash_finup(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - if (likely(tfm->using_shash)) - return shash_ahash_finup(req, ahash_request_ctx(req)); + if (likely(tfm->using_shash)) { + struct ahash_request *r2; + int err; + + err = shash_ahash_finup(req, ahash_request_ctx(req)); + req->base.err = err; + + list_for_each_entry(r2, &req->base.list, base.list) { + struct shash_desc *desc; + + desc = ahash_request_ctx(r2); + r2->base.err = shash_ahash_finup(r2, desc); + } + + return err; + } - return crypto_ahash_alg(tfm)->finup(req); + if (!crypto_ahash_alg(tfm)->finup || + (!crypto_ahash_req_chain(tfm) && ahash_request_hasvirt(req))) + return ahash_def_finup(req); + + return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->finup); } EXPORT_SYMBOL_GPL(crypto_ahash_finup); +static int ahash_def_digest_finish(struct ahash_request *req, int err) +{ + struct crypto_ahash *tfm; + + if (err) + goto out; + + tfm = crypto_ahash_reqtfm(req); + if (ahash_is_async(tfm)) + req->base.complete = ahash_def_finup_done1; + + err = crypto_ahash_update(req); + if (err == -EINPROGRESS || err == -EBUSY) + return err; + + return ahash_def_finup_finish1(req, err); + +out: + ahash_restore_req(req); + return err; +} + +static void ahash_def_digest_done(void *data, int err) +{ + struct ahash_save_req_state *state0 = data; + struct ahash_save_req_state state; + struct ahash_request *areq; + + state = *state0; + areq = state.req0; + if (err == -EINPROGRESS) + goto out; + + areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + err = ahash_def_digest_finish(areq, err); + if (err == -EINPROGRESS || err == -EBUSY) + return; + +out: + state.compl(state.data, err); +} + +static int ahash_def_digest(struct ahash_request *req) +{ + int err; + + err = ahash_save_req(req, ahash_def_digest_done); + if (err) + return err; + + err = crypto_ahash_init(req); + if (err == -EINPROGRESS || err == -EBUSY) + return err; + + return ahash_def_digest_finish(req, err); +} + int crypto_ahash_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - if (likely(tfm->using_shash)) - return shash_ahash_digest(req, prepare_shash_desc(req, tfm)); + if (likely(tfm->using_shash)) { + struct ahash_request *r2; + int err; + + err = shash_ahash_digest(req, prepare_shash_desc(req, tfm)); + req->base.err = err; + + list_for_each_entry(r2, &req->base.list, base.list) { + struct shash_desc *desc; + + desc = prepare_shash_desc(r2, tfm); + r2->base.err = shash_ahash_digest(r2, desc); + } + + return err; + } + + if (!crypto_ahash_req_chain(tfm) && ahash_request_hasvirt(req)) + return ahash_def_digest(req); if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; - return crypto_ahash_alg(tfm)->digest(req); + return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->digest); } EXPORT_SYMBOL_GPL(crypto_ahash_digest); static void ahash_def_finup_done2(void *data, int err) { - struct ahash_request *areq = data; + struct ahash_save_req_state *state = data; + struct ahash_request *areq = state->req0; if (err == -EINPROGRESS) return; - ahash_restore_req(areq, err); - + ahash_restore_req(areq); ahash_request_complete(areq, err); } static int ahash_def_finup_finish1(struct ahash_request *req, int err) { - struct ahash_request *subreq = req->priv; + struct crypto_ahash *tfm; if (err) goto out; - subreq->base.complete = ahash_def_finup_done2; + tfm = crypto_ahash_reqtfm(req); + if (ahash_is_async(tfm)) + req->base.complete = ahash_def_finup_done2; - err = crypto_ahash_alg(crypto_ahash_reqtfm(req))->final(subreq); + err = crypto_ahash_final(req); if (err == -EINPROGRESS || err == -EBUSY) return err; out: - ahash_restore_req(req, err); + ahash_restore_req(req); return err; } static void ahash_def_finup_done1(void *data, int err) { - struct ahash_request *areq = data; - struct ahash_request *subreq; + struct ahash_save_req_state *state0 = data; + struct ahash_save_req_state state; + struct ahash_request *areq; + state = *state0; + areq = state.req0; if (err == -EINPROGRESS) goto out; - subreq = areq->priv; - subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; + areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; err = ahash_def_finup_finish1(areq, err); if (err == -EINPROGRESS || err == -EBUSY) return; out: - ahash_request_complete(areq, err); + state.compl(state.data, err); } static int ahash_def_finup(struct ahash_request *req) { - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); int err; - err = ahash_save_req(req, ahash_def_finup_done1, true); + err = ahash_save_req(req, ahash_def_finup_done1); if (err) return err; - err = crypto_ahash_alg(tfm)->update(req->priv); + err = crypto_ahash_update(req); if (err == -EINPROGRESS || err == -EBUSY) return err; @@ -489,6 +887,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) struct ahash_alg *alg = crypto_ahash_alg(hash); crypto_ahash_set_statesize(hash, alg->halg.statesize); + crypto_ahash_set_reqsize(hash, alg->reqsize); if (tfm->__crt_alg->cra_type == &crypto_shash_type) return crypto_init_ahash_using_shash(tfm); @@ -536,8 +935,8 @@ static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) { seq_printf(m, "type : ahash\n"); - seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? - "yes" : "no"); + seq_printf(m, "async : %s\n", + str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC)); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "digestsize : %u\n", __crypto_hash_alg_common(alg)->digestsize); @@ -654,6 +1053,9 @@ static int ahash_prepare_alg(struct ahash_alg *alg) if (alg->halg.statesize == 0) return -EINVAL; + if (alg->reqsize && alg->reqsize < alg->halg.statesize) + return -EINVAL; + err = hash_prepare_alg(&alg->halg); if (err) return err; @@ -661,8 +1063,6 @@ static int ahash_prepare_alg(struct ahash_alg *alg) base->cra_type = &crypto_ahash_type; base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; - if (!alg->finup) - alg->finup = ahash_def_finup; if (!alg->setkey) alg->setkey = ahash_nosetkey; @@ -733,5 +1133,20 @@ int ahash_register_instance(struct crypto_template *tmpl, } EXPORT_SYMBOL_GPL(ahash_register_instance); +void ahash_request_free(struct ahash_request *req) +{ + struct ahash_request *tmp; + struct ahash_request *r2; + + if (unlikely(!req)) + return; + + list_for_each_entry_safe(r2, tmp, &req->base.list, base.list) + kfree_sensitive(r2); + + kfree_sensitive(req); +} +EXPORT_SYMBOL_GPL(ahash_request_free); + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); diff --git a/crypto/algapi.c b/crypto/algapi.c index 5318c214debb..ea9ed9580aa8 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -464,8 +464,7 @@ void crypto_unregister_alg(struct crypto_alg *alg) if (WARN_ON(refcount_read(&alg->cra_refcnt) != 1)) return; - if (alg->cra_destroy) - alg->cra_destroy(alg); + crypto_alg_put(alg); crypto_remove_final(&list); } @@ -955,7 +954,7 @@ struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue) queue->backlog = queue->backlog->next; request = queue->list.next; - list_del(request); + list_del_init(request); return list_entry(request, struct crypto_async_request, list); } diff --git a/crypto/api.c b/crypto/api.c index bfd177a4313a..3416e98128a0 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -36,7 +36,8 @@ EXPORT_SYMBOL_GPL(crypto_chain); DEFINE_STATIC_KEY_FALSE(__crypto_boot_test_finished); #endif -static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg); +static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg, + u32 type, u32 mask); static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type, u32 mask); @@ -145,7 +146,7 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type, if (alg != &larval->alg) { kfree(larval); if (crypto_is_larval(alg)) - alg = crypto_larval_wait(alg); + alg = crypto_larval_wait(alg, type, mask); } return alg; @@ -197,7 +198,8 @@ static void crypto_start_test(struct crypto_larval *larval) crypto_schedule_test(larval); } -static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg) +static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg, + u32 type, u32 mask) { struct crypto_larval *larval; long time_left; @@ -219,12 +221,7 @@ again: crypto_larval_kill(larval); alg = ERR_PTR(-ETIMEDOUT); } else if (!alg) { - u32 type; - u32 mask; - alg = &larval->alg; - type = alg->cra_flags & ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD); - mask = larval->mask; alg = crypto_alg_lookup(alg->cra_name, type, mask) ?: ERR_PTR(-EAGAIN); } else if (IS_ERR(alg)) @@ -304,7 +301,7 @@ static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, } if (!IS_ERR_OR_NULL(alg) && crypto_is_larval(alg)) - alg = crypto_larval_wait(alg); + alg = crypto_larval_wait(alg, type, mask); else if (alg) ; else if (!(mask & CRYPTO_ALG_TESTED)) @@ -352,7 +349,7 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask) ok = crypto_probing_notify(CRYPTO_MSG_ALG_REQUEST, larval); if (ok == NOTIFY_STOP) - alg = crypto_larval_wait(larval); + alg = crypto_larval_wait(larval, type, mask); else { crypto_mod_put(larval); alg = ERR_PTR(-ENOENT); @@ -386,10 +383,6 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask) case CRYPTO_ALG_TYPE_CIPHER: len += crypto_cipher_ctxsize(alg); break; - - case CRYPTO_ALG_TYPE_COMPRESS: - len += crypto_compress_ctxsize(alg); - break; } return len; @@ -710,5 +703,15 @@ void crypto_req_done(void *data, int err) } EXPORT_SYMBOL_GPL(crypto_req_done); +void crypto_destroy_alg(struct crypto_alg *alg) +{ + if (alg->cra_type && alg->cra_type->destroy) + alg->cra_type->destroy(alg); + + if (alg->cra_destroy) + alg->cra_destroy(alg); +} +EXPORT_SYMBOL_GPL(crypto_destroy_alg); + MODULE_DESCRIPTION("Cryptographic core API"); MODULE_LICENSE("GPL"); diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index bbd07a9022e6..bf165d321440 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -267,7 +267,6 @@ static int software_key_eds_op(struct kernel_pkey_params *params, struct crypto_sig *sig; char *key, *ptr; bool issig; - int ksz; int ret; pr_devel("==>%s()\n", __func__); @@ -302,8 +301,6 @@ static int software_key_eds_op(struct kernel_pkey_params *params, ret = crypto_sig_set_pubkey(sig, key, pkey->keylen); if (ret) goto error_free_tfm; - - ksz = crypto_sig_keysize(sig); } else { tfm = crypto_alloc_akcipher(alg_name, 0, 0); if (IS_ERR(tfm)) { @@ -317,8 +314,6 @@ static int software_key_eds_op(struct kernel_pkey_params *params, ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen); if (ret) goto error_free_tfm; - - ksz = crypto_akcipher_maxsize(tfm); } ret = -EINVAL; @@ -347,8 +342,8 @@ static int software_key_eds_op(struct kernel_pkey_params *params, BUG(); } - if (ret == 0) - ret = ksz; + if (!issig && ret == 0) + ret = crypto_akcipher_maxsize(tfm); error_free_tfm: if (issig) diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 1a3855284091..2c499654a36c 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -389,32 +389,6 @@ async_xor_val_offs(struct page *dest, unsigned int offset, } EXPORT_SYMBOL_GPL(async_xor_val_offs); -/** - * async_xor_val - attempt a xor parity check with a dma engine. - * @dest: destination page used if the xor is performed synchronously - * @src_list: array of source pages - * @offset: offset in pages to start transaction - * @src_cnt: number of source pages - * @len: length in bytes - * @result: 0 if sum == 0 else non-zero - * @submit: submission / completion modifiers - * - * honored flags: ASYNC_TX_ACK - * - * src_list note: if the dest is also a source it must be at index zero. - * The contents of this array will be overwritten if a scribble region - * is not specified. - */ -struct dma_async_tx_descriptor * -async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, - int src_cnt, size_t len, enum sum_check_flags *result, - struct async_submit_ctl *submit) -{ - return async_xor_val_offs(dest, offset, src_list, NULL, src_cnt, - len, result, submit); -} -EXPORT_SYMBOL_GPL(async_xor_val); - MODULE_AUTHOR("Intel Corporation"); MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api"); MODULE_LICENSE("GPL"); diff --git a/crypto/bpf_crypto_skcipher.c b/crypto/bpf_crypto_skcipher.c index b5e657415770..a88798d3e8c8 100644 --- a/crypto/bpf_crypto_skcipher.c +++ b/crypto/bpf_crypto_skcipher.c @@ -80,3 +80,4 @@ static void __exit bpf_crypto_skcipher_exit(void) module_init(bpf_crypto_skcipher_init); module_exit(bpf_crypto_skcipher_exit); MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Symmetric key cipher support for BPF"); diff --git a/crypto/chacha_generic.c b/crypto/chacha_generic.c index ba7fcb47f9aa..1fb9fbd302c6 100644 --- a/crypto/chacha_generic.c +++ b/crypto/chacha_generic.c @@ -21,7 +21,7 @@ static int chacha_stream_xor(struct skcipher_request *req, err = skcipher_walk_virt(&walk, req, false); - chacha_init_generic(state, ctx->key, iv); + chacha_init(state, ctx->key, iv); while (walk.nbytes > 0) { unsigned int nbytes = walk.nbytes; @@ -54,7 +54,7 @@ static int crypto_xchacha_crypt(struct skcipher_request *req) u8 real_iv[16]; /* Compute the subkey given the original key and first 128 nonce bits */ - chacha_init_generic(state, ctx->key, req->iv); + chacha_init(state, ctx->key, req->iv); hchacha_block_generic(state, subctx.key, ctx->nrounds); subctx.nrounds = ctx->nrounds; diff --git a/crypto/compress.c b/crypto/compress.c deleted file mode 100644 index 9048fe390c46..000000000000 --- a/crypto/compress.c +++ /dev/null @@ -1,32 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Cryptographic API. - * - * Compression operations. - * - * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> - */ -#include <linux/crypto.h> -#include "internal.h" - -int crypto_comp_compress(struct crypto_comp *comp, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) -{ - struct crypto_tfm *tfm = crypto_comp_tfm(comp); - - return tfm->__crt_alg->cra_compress.coa_compress(tfm, src, slen, dst, - dlen); -} -EXPORT_SYMBOL_GPL(crypto_comp_compress); - -int crypto_comp_decompress(struct crypto_comp *comp, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) -{ - struct crypto_tfm *tfm = crypto_comp_tfm(comp); - - return tfm->__crt_alg->cra_compress.coa_decompress(tfm, src, slen, dst, - dlen); -} -EXPORT_SYMBOL_GPL(crypto_comp_decompress); diff --git a/crypto/compress.h b/crypto/compress.h index c3cedfb5e606..f7737a1fcbbd 100644 --- a/crypto/compress.h +++ b/crypto/compress.h @@ -15,8 +15,6 @@ struct acomp_req; struct comp_alg_common; int crypto_init_scomp_ops_async(struct crypto_tfm *tfm); -struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req); -void crypto_acomp_scomp_free_ctx(struct acomp_req *req); void comp_prepare_alg(struct comp_alg_common *alg); diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c index 5b84b0f7cc17..ced90f88ee07 100644 --- a/crypto/crypto_null.c +++ b/crypto/crypto_null.c @@ -17,23 +17,13 @@ #include <crypto/internal/skcipher.h> #include <linux/init.h> #include <linux/module.h> -#include <linux/mm.h> +#include <linux/spinlock.h> #include <linux/string.h> -static DEFINE_MUTEX(crypto_default_null_skcipher_lock); +static DEFINE_SPINLOCK(crypto_default_null_skcipher_lock); static struct crypto_sync_skcipher *crypto_default_null_skcipher; static int crypto_default_null_skcipher_refcnt; -static int null_compress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) -{ - if (slen > *dlen) - return -EINVAL; - memcpy(dst, src, slen); - *dlen = slen; - return 0; -} - static int null_init(struct shash_desc *desc) { return 0; @@ -121,7 +111,7 @@ static struct skcipher_alg skcipher_null = { .decrypt = null_skcipher_crypt, }; -static struct crypto_alg null_algs[] = { { +static struct crypto_alg cipher_null = { .cra_name = "cipher_null", .cra_driver_name = "cipher_null-generic", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, @@ -134,41 +124,39 @@ static struct crypto_alg null_algs[] = { { .cia_setkey = null_setkey, .cia_encrypt = null_crypt, .cia_decrypt = null_crypt } } -}, { - .cra_name = "compress_null", - .cra_driver_name = "compress_null-generic", - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_blocksize = NULL_BLOCK_SIZE, - .cra_ctxsize = 0, - .cra_module = THIS_MODULE, - .cra_u = { .compress = { - .coa_compress = null_compress, - .coa_decompress = null_compress } } -} }; +}; -MODULE_ALIAS_CRYPTO("compress_null"); MODULE_ALIAS_CRYPTO("digest_null"); MODULE_ALIAS_CRYPTO("cipher_null"); struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void) { + struct crypto_sync_skcipher *ntfm = NULL; struct crypto_sync_skcipher *tfm; - mutex_lock(&crypto_default_null_skcipher_lock); + spin_lock_bh(&crypto_default_null_skcipher_lock); tfm = crypto_default_null_skcipher; if (!tfm) { - tfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0); - if (IS_ERR(tfm)) - goto unlock; - - crypto_default_null_skcipher = tfm; + spin_unlock_bh(&crypto_default_null_skcipher_lock); + + ntfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0); + if (IS_ERR(ntfm)) + return ntfm; + + spin_lock_bh(&crypto_default_null_skcipher_lock); + tfm = crypto_default_null_skcipher; + if (!tfm) { + tfm = ntfm; + ntfm = NULL; + crypto_default_null_skcipher = tfm; + } } crypto_default_null_skcipher_refcnt++; + spin_unlock_bh(&crypto_default_null_skcipher_lock); -unlock: - mutex_unlock(&crypto_default_null_skcipher_lock); + crypto_free_sync_skcipher(ntfm); return tfm; } @@ -176,12 +164,16 @@ EXPORT_SYMBOL_GPL(crypto_get_default_null_skcipher); void crypto_put_default_null_skcipher(void) { - mutex_lock(&crypto_default_null_skcipher_lock); + struct crypto_sync_skcipher *tfm = NULL; + + spin_lock_bh(&crypto_default_null_skcipher_lock); if (!--crypto_default_null_skcipher_refcnt) { - crypto_free_sync_skcipher(crypto_default_null_skcipher); + tfm = crypto_default_null_skcipher; crypto_default_null_skcipher = NULL; } - mutex_unlock(&crypto_default_null_skcipher_lock); + spin_unlock_bh(&crypto_default_null_skcipher_lock); + + crypto_free_sync_skcipher(tfm); } EXPORT_SYMBOL_GPL(crypto_put_default_null_skcipher); @@ -189,7 +181,7 @@ static int __init crypto_null_mod_init(void) { int ret = 0; - ret = crypto_register_algs(null_algs, ARRAY_SIZE(null_algs)); + ret = crypto_register_alg(&cipher_null); if (ret < 0) goto out; @@ -206,14 +198,14 @@ static int __init crypto_null_mod_init(void) out_unregister_shash: crypto_unregister_shash(&digest_null); out_unregister_algs: - crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs)); + crypto_unregister_alg(&cipher_null); out: return ret; } static void __exit crypto_null_mod_fini(void) { - crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs)); + crypto_unregister_alg(&cipher_null); crypto_unregister_shash(&digest_null); crypto_unregister_skcipher(&skcipher_null); } diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index 6c571834e86a..aad429bef03e 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c @@ -84,17 +84,6 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg) sizeof(rcipher), &rcipher); } -static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg) -{ - struct crypto_report_comp rcomp; - - memset(&rcomp, 0, sizeof(rcomp)); - - strscpy(rcomp.type, "compression", sizeof(rcomp.type)); - - return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, sizeof(rcomp), &rcomp); -} - static int crypto_report_one(struct crypto_alg *alg, struct crypto_user_alg *ualg, struct sk_buff *skb) { @@ -136,11 +125,6 @@ static int crypto_report_one(struct crypto_alg *alg, goto nla_put_failure; break; - case CRYPTO_ALG_TYPE_COMPRESS: - if (crypto_report_comp(skb, alg)) - goto nla_put_failure; - - break; } out: diff --git a/crypto/ctr.c b/crypto/ctr.c index 73c0d6e53b2f..97a947b0a876 100644 --- a/crypto/ctr.c +++ b/crypto/ctr.c @@ -33,7 +33,7 @@ static void crypto_ctr_crypt_final(struct skcipher_walk *walk, u8 *ctrblk = walk->iv; u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK]; u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1); - u8 *src = walk->src.virt.addr; + const u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; unsigned int nbytes = walk->nbytes; @@ -50,7 +50,7 @@ static int crypto_ctr_crypt_segment(struct skcipher_walk *walk, crypto_cipher_alg(tfm)->cia_encrypt; unsigned int bsize = crypto_cipher_blocksize(tfm); u8 *ctrblk = walk->iv; - u8 *src = walk->src.virt.addr; + const u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; unsigned int nbytes = walk->nbytes; @@ -77,20 +77,20 @@ static int crypto_ctr_crypt_inplace(struct skcipher_walk *walk, unsigned int bsize = crypto_cipher_blocksize(tfm); unsigned long alignmask = crypto_cipher_alignmask(tfm); unsigned int nbytes = walk->nbytes; + u8 *dst = walk->dst.virt.addr; u8 *ctrblk = walk->iv; - u8 *src = walk->src.virt.addr; u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK]; u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1); do { /* create keystream */ fn(crypto_cipher_tfm(tfm), keystream, ctrblk); - crypto_xor(src, keystream, bsize); + crypto_xor(dst, keystream, bsize); /* increment counter in counterblock */ crypto_inc(ctrblk, bsize); - src += bsize; + dst += bsize; } while ((nbytes -= bsize) >= bsize); return nbytes; diff --git a/crypto/deflate.c b/crypto/deflate.c index 98e8bcb81a6a..5c346c544093 100644 --- a/crypto/deflate.c +++ b/crypto/deflate.c @@ -112,7 +112,7 @@ out: return ret; } -static void *deflate_alloc_ctx(struct crypto_scomp *tfm) +static void *deflate_alloc_ctx(void) { struct deflate_ctx *ctx; int ret; @@ -130,32 +130,18 @@ static void *deflate_alloc_ctx(struct crypto_scomp *tfm) return ctx; } -static int deflate_init(struct crypto_tfm *tfm) -{ - struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); - - return __deflate_init(ctx); -} - static void __deflate_exit(void *ctx) { deflate_comp_exit(ctx); deflate_decomp_exit(ctx); } -static void deflate_free_ctx(struct crypto_scomp *tfm, void *ctx) +static void deflate_free_ctx(void *ctx) { __deflate_exit(ctx); kfree_sensitive(ctx); } -static void deflate_exit(struct crypto_tfm *tfm) -{ - struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); - - __deflate_exit(ctx); -} - static int __deflate_compress(const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) { @@ -185,14 +171,6 @@ out: return ret; } -static int deflate_compress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) -{ - struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); - - return __deflate_compress(src, slen, dst, dlen, dctx); -} - static int deflate_scompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) @@ -241,14 +219,6 @@ out: return ret; } -static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) -{ - struct deflate_ctx *dctx = crypto_tfm_ctx(tfm); - - return __deflate_decompress(src, slen, dst, dlen, dctx); -} - static int deflate_sdecompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) @@ -256,19 +226,6 @@ static int deflate_sdecompress(struct crypto_scomp *tfm, const u8 *src, return __deflate_decompress(src, slen, dst, dlen, ctx); } -static struct crypto_alg alg = { - .cra_name = "deflate", - .cra_driver_name = "deflate-generic", - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct deflate_ctx), - .cra_module = THIS_MODULE, - .cra_init = deflate_init, - .cra_exit = deflate_exit, - .cra_u = { .compress = { - .coa_compress = deflate_compress, - .coa_decompress = deflate_decompress } } -}; - static struct scomp_alg scomp = { .alloc_ctx = deflate_alloc_ctx, .free_ctx = deflate_free_ctx, @@ -283,24 +240,11 @@ static struct scomp_alg scomp = { static int __init deflate_mod_init(void) { - int ret; - - ret = crypto_register_alg(&alg); - if (ret) - return ret; - - ret = crypto_register_scomp(&scomp); - if (ret) { - crypto_unregister_alg(&alg); - return ret; - } - - return ret; + return crypto_register_scomp(&scomp); } static void __exit deflate_mod_fini(void) { - crypto_unregister_alg(&alg); crypto_unregister_scomp(&scomp); } diff --git a/crypto/ecc.c b/crypto/ecc.c index 50ad2d4ed672..6cf9a945fc6c 100644 --- a/crypto/ecc.c +++ b/crypto/ecc.c @@ -71,7 +71,7 @@ EXPORT_SYMBOL(ecc_get_curve); void ecc_digits_from_bytes(const u8 *in, unsigned int nbytes, u64 *out, unsigned int ndigits) { - int diff = ndigits - DIV_ROUND_UP(nbytes, sizeof(u64)); + int diff = ndigits - DIV_ROUND_UP_POW2(nbytes, sizeof(u64)); unsigned int o = nbytes & 7; __be64 msd = 0; diff --git a/crypto/ecdsa-p1363.c b/crypto/ecdsa-p1363.c index eaae7214d69b..4454f1f8f33f 100644 --- a/crypto/ecdsa-p1363.c +++ b/crypto/ecdsa-p1363.c @@ -22,7 +22,7 @@ static int ecdsa_p1363_verify(struct crypto_sig *tfm, { struct ecdsa_p1363_ctx *ctx = crypto_sig_ctx(tfm); unsigned int keylen = crypto_sig_keysize(ctx->child); - unsigned int ndigits = DIV_ROUND_UP(keylen, sizeof(u64)); + unsigned int ndigits = DIV_ROUND_UP_POW2(keylen, sizeof(u64)); struct ecdsa_raw_sig sig; if (slen != 2 * keylen) diff --git a/crypto/ecdsa-x962.c b/crypto/ecdsa-x962.c index 6a77c13e192b..90a04f4b9a2f 100644 --- a/crypto/ecdsa-x962.c +++ b/crypto/ecdsa-x962.c @@ -81,8 +81,8 @@ static int ecdsa_x962_verify(struct crypto_sig *tfm, struct ecdsa_x962_signature_ctx sig_ctx; int err; - sig_ctx.ndigits = DIV_ROUND_UP(crypto_sig_keysize(ctx->child), - sizeof(u64)); + sig_ctx.ndigits = DIV_ROUND_UP_POW2(crypto_sig_keysize(ctx->child), + sizeof(u64)); err = asn1_ber_decoder(&ecdsasignature_decoder, &sig_ctx, src, slen); if (err < 0) diff --git a/crypto/essiv.c b/crypto/essiv.c index 1c00c3324058..ec0ec8992c2d 100644 --- a/crypto/essiv.c +++ b/crypto/essiv.c @@ -405,8 +405,7 @@ static bool parse_cipher_name(char *essiv_cipher_name, const char *cra_name) if (len >= CRYPTO_MAX_ALG_NAME) return false; - memcpy(essiv_cipher_name, p, len); - essiv_cipher_name[len] = '\0'; + strscpy(essiv_cipher_name, p, len + 1); return true; } diff --git a/crypto/internal.h b/crypto/internal.h index 46b661be0f90..11567ea24fc3 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -33,6 +33,21 @@ struct crypto_larval { bool test_started; }; +struct crypto_type { + unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask); + unsigned int (*extsize)(struct crypto_alg *alg); + int (*init_tfm)(struct crypto_tfm *tfm); + void (*show)(struct seq_file *m, struct crypto_alg *alg); + int (*report)(struct sk_buff *skb, struct crypto_alg *alg); + void (*free)(struct crypto_instance *inst); + void (*destroy)(struct crypto_alg *alg); + + unsigned int type; + unsigned int maskclear; + unsigned int maskset; + unsigned int tfmsize; +}; + enum { CRYPTOA_UNSPEC, CRYPTOA_ALG, @@ -113,6 +128,7 @@ void *crypto_create_tfm_node(struct crypto_alg *alg, const struct crypto_type *frontend, int node); void *crypto_clone_tfm(const struct crypto_type *frontend, struct crypto_tfm *otfm); +void crypto_destroy_alg(struct crypto_alg *alg); static inline void *crypto_create_tfm(struct crypto_alg *alg, const struct crypto_type *frontend) @@ -149,8 +165,8 @@ static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg) static inline void crypto_alg_put(struct crypto_alg *alg) { - if (refcount_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy) - alg->cra_destroy(alg); + if (refcount_dec_and_test(&alg->cra_refcnt)) + crypto_destroy_alg(alg); } static inline int crypto_tmpl_get(struct crypto_template *tmpl) diff --git a/crypto/krb5/Kconfig b/crypto/krb5/Kconfig new file mode 100644 index 000000000000..4d0476e13f3c --- /dev/null +++ b/crypto/krb5/Kconfig @@ -0,0 +1,26 @@ +config CRYPTO_KRB5 + tristate "Kerberos 5 crypto" + select CRYPTO_MANAGER + select CRYPTO_KRB5ENC + select CRYPTO_AUTHENC + select CRYPTO_SKCIPHER + select CRYPTO_HASH_INFO + select CRYPTO_HMAC + select CRYPTO_CMAC + select CRYPTO_SHA1 + select CRYPTO_SHA256 + select CRYPTO_SHA512 + select CRYPTO_CBC + select CRYPTO_CTS + select CRYPTO_AES + select CRYPTO_CAMELLIA + help + Provide a library for provision of Kerberos-5-based crypto. This is + intended for network filesystems to use. + +config CRYPTO_KRB5_SELFTESTS + bool "Kerberos 5 crypto selftests" + depends on CRYPTO_KRB5 + help + Turn on some self-testing for the kerberos 5 crypto functions. These + will be performed on module load or boot, if compiled in. diff --git a/crypto/krb5/Makefile b/crypto/krb5/Makefile new file mode 100644 index 000000000000..d38890c0b247 --- /dev/null +++ b/crypto/krb5/Makefile @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for asymmetric cryptographic keys +# + +krb5-y += \ + krb5_kdf.o \ + krb5_api.o \ + rfc3961_simplified.o \ + rfc3962_aes.o \ + rfc6803_camellia.o \ + rfc8009_aes2.o + +krb5-$(CONFIG_CRYPTO_KRB5_SELFTESTS) += \ + selftest.o \ + selftest_data.o + +obj-$(CONFIG_CRYPTO_KRB5) += krb5.o diff --git a/crypto/krb5/internal.h b/crypto/krb5/internal.h new file mode 100644 index 000000000000..a59084ffafe8 --- /dev/null +++ b/crypto/krb5/internal.h @@ -0,0 +1,247 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Kerberos5 crypto internals + * + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#include <linux/scatterlist.h> +#include <crypto/krb5.h> +#include <crypto/hash.h> +#include <crypto/skcipher.h> + +/* + * Profile used for key derivation and encryption. + */ +struct krb5_crypto_profile { + /* Pseudo-random function */ + int (*calc_PRF)(const struct krb5_enctype *krb5, + const struct krb5_buffer *protocol_key, + const struct krb5_buffer *octet_string, + struct krb5_buffer *result, + gfp_t gfp); + + /* Checksum key derivation */ + int (*calc_Kc)(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + const struct krb5_buffer *usage_constant, + struct krb5_buffer *Kc, + gfp_t gfp); + + /* Encryption key derivation */ + int (*calc_Ke)(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + const struct krb5_buffer *usage_constant, + struct krb5_buffer *Ke, + gfp_t gfp); + + /* Integrity key derivation */ + int (*calc_Ki)(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + const struct krb5_buffer *usage_constant, + struct krb5_buffer *Ki, + gfp_t gfp); + + /* Derive the keys needed for an encryption AEAD object. */ + int (*derive_encrypt_keys)(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + unsigned int usage, + struct krb5_buffer *setkey, + gfp_t gfp); + + /* Directly load the keys needed for an encryption AEAD object. */ + int (*load_encrypt_keys)(const struct krb5_enctype *krb5, + const struct krb5_buffer *Ke, + const struct krb5_buffer *Ki, + struct krb5_buffer *setkey, + gfp_t gfp); + + /* Derive the key needed for a checksum hash object. */ + int (*derive_checksum_key)(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + unsigned int usage, + struct krb5_buffer *setkey, + gfp_t gfp); + + /* Directly load the keys needed for a checksum hash object. */ + int (*load_checksum_key)(const struct krb5_enctype *krb5, + const struct krb5_buffer *Kc, + struct krb5_buffer *setkey, + gfp_t gfp); + + /* Encrypt data in-place, inserting confounder and checksum. */ + ssize_t (*encrypt)(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t sg_len, + size_t data_offset, size_t data_len, + bool preconfounded); + + /* Decrypt data in-place, removing confounder and checksum */ + int (*decrypt)(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len); + + /* Generate a MIC on part of a packet, inserting the checksum */ + ssize_t (*get_mic)(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, + size_t sg_len, + size_t data_offset, size_t data_len); + + /* Verify the MIC on a piece of data, removing the checksum */ + int (*verify_mic)(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len); +}; + +/* + * Crypto size/alignment rounding convenience macros. + */ +#define crypto_roundup(X) ((unsigned int)round_up((X), CRYPTO_MINALIGN)) + +#define krb5_aead_size(TFM) \ + crypto_roundup(sizeof(struct aead_request) + crypto_aead_reqsize(TFM)) +#define krb5_aead_ivsize(TFM) \ + crypto_roundup(crypto_aead_ivsize(TFM)) +#define krb5_shash_size(TFM) \ + crypto_roundup(sizeof(struct shash_desc) + crypto_shash_descsize(TFM)) +#define krb5_digest_size(TFM) \ + crypto_roundup(crypto_shash_digestsize(TFM)) +#define round16(x) (((x) + 15) & ~15) + +/* + * Self-testing data. + */ +struct krb5_prf_test { + u32 etype; + const char *name, *key, *octet, *prf; +}; + +struct krb5_key_test_one { + u32 use; + const char *key; +}; + +struct krb5_key_test { + u32 etype; + const char *name, *key; + struct krb5_key_test_one Kc, Ke, Ki; +}; + +struct krb5_enc_test { + u32 etype; + u32 usage; + const char *name, *plain, *conf, *K0, *Ke, *Ki, *ct; +}; + +struct krb5_mic_test { + u32 etype; + u32 usage; + const char *name, *plain, *K0, *Kc, *mic; +}; + +/* + * krb5_api.c + */ +struct crypto_aead *krb5_prepare_encryption(const struct krb5_enctype *krb5, + const struct krb5_buffer *keys, + gfp_t gfp); +struct crypto_shash *krb5_prepare_checksum(const struct krb5_enctype *krb5, + const struct krb5_buffer *Kc, + gfp_t gfp); + +/* + * krb5_kdf.c + */ +int krb5_derive_Kc(const struct krb5_enctype *krb5, const struct krb5_buffer *TK, + u32 usage, struct krb5_buffer *key, gfp_t gfp); +int krb5_derive_Ke(const struct krb5_enctype *krb5, const struct krb5_buffer *TK, + u32 usage, struct krb5_buffer *key, gfp_t gfp); +int krb5_derive_Ki(const struct krb5_enctype *krb5, const struct krb5_buffer *TK, + u32 usage, struct krb5_buffer *key, gfp_t gfp); + +/* + * rfc3961_simplified.c + */ +extern const struct krb5_crypto_profile rfc3961_simplified_profile; + +int crypto_shash_update_sg(struct shash_desc *desc, struct scatterlist *sg, + size_t offset, size_t len); +int authenc_derive_encrypt_keys(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + unsigned int usage, + struct krb5_buffer *setkey, + gfp_t gfp); +int authenc_load_encrypt_keys(const struct krb5_enctype *krb5, + const struct krb5_buffer *Ke, + const struct krb5_buffer *Ki, + struct krb5_buffer *setkey, + gfp_t gfp); +int rfc3961_derive_checksum_key(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + unsigned int usage, + struct krb5_buffer *setkey, + gfp_t gfp); +int rfc3961_load_checksum_key(const struct krb5_enctype *krb5, + const struct krb5_buffer *Kc, + struct krb5_buffer *setkey, + gfp_t gfp); +ssize_t krb5_aead_encrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, size_t sg_len, + size_t data_offset, size_t data_len, + bool preconfounded); +int krb5_aead_decrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len); +ssize_t rfc3961_get_mic(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, size_t sg_len, + size_t data_offset, size_t data_len); +int rfc3961_verify_mic(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len); + +/* + * rfc3962_aes.c + */ +extern const struct krb5_enctype krb5_aes128_cts_hmac_sha1_96; +extern const struct krb5_enctype krb5_aes256_cts_hmac_sha1_96; + +/* + * rfc6803_camellia.c + */ +extern const struct krb5_enctype krb5_camellia128_cts_cmac; +extern const struct krb5_enctype krb5_camellia256_cts_cmac; + +/* + * rfc8009_aes2.c + */ +extern const struct krb5_enctype krb5_aes128_cts_hmac_sha256_128; +extern const struct krb5_enctype krb5_aes256_cts_hmac_sha384_192; + +/* + * selftest.c + */ +#ifdef CONFIG_CRYPTO_KRB5_SELFTESTS +int krb5_selftest(void); +#else +static inline int krb5_selftest(void) { return 0; } +#endif + +/* + * selftest_data.c + */ +extern const struct krb5_prf_test krb5_prf_tests[]; +extern const struct krb5_key_test krb5_key_tests[]; +extern const struct krb5_enc_test krb5_enc_tests[]; +extern const struct krb5_mic_test krb5_mic_tests[]; diff --git a/crypto/krb5/krb5_api.c b/crypto/krb5/krb5_api.c new file mode 100644 index 000000000000..23026d4206c8 --- /dev/null +++ b/crypto/krb5/krb5_api.c @@ -0,0 +1,452 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Kerberos 5 crypto library. + * + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/export.h> +#include <linux/kernel.h> +#include "internal.h" + +MODULE_DESCRIPTION("Kerberos 5 crypto"); +MODULE_AUTHOR("Red Hat, Inc."); +MODULE_LICENSE("GPL"); + +static const struct krb5_enctype *const krb5_supported_enctypes[] = { + &krb5_aes128_cts_hmac_sha1_96, + &krb5_aes256_cts_hmac_sha1_96, + &krb5_aes128_cts_hmac_sha256_128, + &krb5_aes256_cts_hmac_sha384_192, + &krb5_camellia128_cts_cmac, + &krb5_camellia256_cts_cmac, +}; + +/** + * crypto_krb5_find_enctype - Find the handler for a Kerberos5 encryption type + * @enctype: The standard Kerberos encryption type number + * + * Look up a Kerberos encryption type by number. If successful, returns a + * pointer to the type tables; returns NULL otherwise. + */ +const struct krb5_enctype *crypto_krb5_find_enctype(u32 enctype) +{ + const struct krb5_enctype *krb5; + size_t i; + + for (i = 0; i < ARRAY_SIZE(krb5_supported_enctypes); i++) { + krb5 = krb5_supported_enctypes[i]; + if (krb5->etype == enctype) + return krb5; + } + + return NULL; +} +EXPORT_SYMBOL(crypto_krb5_find_enctype); + +/** + * crypto_krb5_how_much_buffer - Work out how much buffer is required for an amount of data + * @krb5: The encoding to use. + * @mode: The mode in which to operated (checksum/encrypt) + * @data_size: How much data we want to allow for + * @_offset: Where to place the offset into the buffer + * + * Calculate how much buffer space is required to wrap a given amount of data. + * This allows for a confounder, padding and checksum as appropriate. The + * amount of buffer required is returned and the offset into the buffer at + * which the data will start is placed in *_offset. + */ +size_t crypto_krb5_how_much_buffer(const struct krb5_enctype *krb5, + enum krb5_crypto_mode mode, + size_t data_size, size_t *_offset) +{ + switch (mode) { + case KRB5_CHECKSUM_MODE: + *_offset = krb5->cksum_len; + return krb5->cksum_len + data_size; + + case KRB5_ENCRYPT_MODE: + *_offset = krb5->conf_len; + return krb5->conf_len + data_size + krb5->cksum_len; + + default: + WARN_ON(1); + *_offset = 0; + return 0; + } +} +EXPORT_SYMBOL(crypto_krb5_how_much_buffer); + +/** + * crypto_krb5_how_much_data - Work out how much data can fit in an amount of buffer + * @krb5: The encoding to use. + * @mode: The mode in which to operated (checksum/encrypt) + * @_buffer_size: How much buffer we want to allow for (may be reduced) + * @_offset: Where to place the offset into the buffer + * + * Calculate how much data can be fitted into given amount of buffer. This + * allows for a confounder, padding and checksum as appropriate. The amount of + * data that will fit is returned, the amount of buffer required is shrunk to + * allow for alignment and the offset into the buffer at which the data will + * start is placed in *_offset. + */ +size_t crypto_krb5_how_much_data(const struct krb5_enctype *krb5, + enum krb5_crypto_mode mode, + size_t *_buffer_size, size_t *_offset) +{ + size_t buffer_size = *_buffer_size, data_size; + + switch (mode) { + case KRB5_CHECKSUM_MODE: + if (WARN_ON(buffer_size < krb5->cksum_len + 1)) + goto bad; + *_offset = krb5->cksum_len; + return buffer_size - krb5->cksum_len; + + case KRB5_ENCRYPT_MODE: + if (WARN_ON(buffer_size < krb5->conf_len + 1 + krb5->cksum_len)) + goto bad; + data_size = buffer_size - krb5->cksum_len; + *_offset = krb5->conf_len; + return data_size - krb5->conf_len; + + default: + WARN_ON(1); + goto bad; + } + +bad: + *_offset = 0; + return 0; +} +EXPORT_SYMBOL(crypto_krb5_how_much_data); + +/** + * crypto_krb5_where_is_the_data - Find the data in a decrypted message + * @krb5: The encoding to use. + * @mode: Mode of operation + * @_offset: Offset of the secure blob in the buffer; updated to data offset. + * @_len: The length of the secure blob; updated to data length. + * + * Find the offset and size of the data in a secure message so that this + * information can be used in the metadata buffer which will get added to the + * digest by crypto_krb5_verify_mic(). + */ +void crypto_krb5_where_is_the_data(const struct krb5_enctype *krb5, + enum krb5_crypto_mode mode, + size_t *_offset, size_t *_len) +{ + switch (mode) { + case KRB5_CHECKSUM_MODE: + *_offset += krb5->cksum_len; + *_len -= krb5->cksum_len; + return; + case KRB5_ENCRYPT_MODE: + *_offset += krb5->conf_len; + *_len -= krb5->conf_len + krb5->cksum_len; + return; + default: + WARN_ON_ONCE(1); + return; + } +} +EXPORT_SYMBOL(crypto_krb5_where_is_the_data); + +/* + * Prepare the encryption with derived key data. + */ +struct crypto_aead *krb5_prepare_encryption(const struct krb5_enctype *krb5, + const struct krb5_buffer *keys, + gfp_t gfp) +{ + struct crypto_aead *ci = NULL; + int ret = -ENOMEM; + + ci = crypto_alloc_aead(krb5->encrypt_name, 0, 0); + if (IS_ERR(ci)) { + ret = PTR_ERR(ci); + if (ret == -ENOENT) + ret = -ENOPKG; + goto err; + } + + ret = crypto_aead_setkey(ci, keys->data, keys->len); + if (ret < 0) { + pr_err("Couldn't set AEAD key %s: %d\n", krb5->encrypt_name, ret); + goto err_ci; + } + + ret = crypto_aead_setauthsize(ci, krb5->cksum_len); + if (ret < 0) { + pr_err("Couldn't set AEAD authsize %s: %d\n", krb5->encrypt_name, ret); + goto err_ci; + } + + return ci; +err_ci: + crypto_free_aead(ci); +err: + return ERR_PTR(ret); +} + +/** + * crypto_krb5_prepare_encryption - Prepare AEAD crypto object for encryption-mode + * @krb5: The encoding to use. + * @TK: The transport key to use. + * @usage: The usage constant for key derivation. + * @gfp: Allocation flags. + * + * Allocate a crypto object that does all the necessary crypto, key it and set + * its parameters and return the crypto handle to it. This can then be used to + * dispatch encrypt and decrypt operations. + */ +struct crypto_aead *crypto_krb5_prepare_encryption(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + u32 usage, gfp_t gfp) +{ + struct crypto_aead *ci = NULL; + struct krb5_buffer keys = {}; + int ret; + + ret = krb5->profile->derive_encrypt_keys(krb5, TK, usage, &keys, gfp); + if (ret < 0) + goto err; + + ci = krb5_prepare_encryption(krb5, &keys, gfp); + if (IS_ERR(ci)) { + ret = PTR_ERR(ci); + goto err; + } + + kfree(keys.data); + return ci; +err: + kfree(keys.data); + return ERR_PTR(ret); +} +EXPORT_SYMBOL(crypto_krb5_prepare_encryption); + +/* + * Prepare the checksum with derived key data. + */ +struct crypto_shash *krb5_prepare_checksum(const struct krb5_enctype *krb5, + const struct krb5_buffer *Kc, + gfp_t gfp) +{ + struct crypto_shash *ci = NULL; + int ret = -ENOMEM; + + ci = crypto_alloc_shash(krb5->cksum_name, 0, 0); + if (IS_ERR(ci)) { + ret = PTR_ERR(ci); + if (ret == -ENOENT) + ret = -ENOPKG; + goto err; + } + + ret = crypto_shash_setkey(ci, Kc->data, Kc->len); + if (ret < 0) { + pr_err("Couldn't set shash key %s: %d\n", krb5->cksum_name, ret); + goto err_ci; + } + + return ci; +err_ci: + crypto_free_shash(ci); +err: + return ERR_PTR(ret); +} + +/** + * crypto_krb5_prepare_checksum - Prepare AEAD crypto object for checksum-mode + * @krb5: The encoding to use. + * @TK: The transport key to use. + * @usage: The usage constant for key derivation. + * @gfp: Allocation flags. + * + * Allocate a crypto object that does all the necessary crypto, key it and set + * its parameters and return the crypto handle to it. This can then be used to + * dispatch get_mic and verify_mic operations. + */ +struct crypto_shash *crypto_krb5_prepare_checksum(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + u32 usage, gfp_t gfp) +{ + struct crypto_shash *ci = NULL; + struct krb5_buffer keys = {}; + int ret; + + ret = krb5->profile->derive_checksum_key(krb5, TK, usage, &keys, gfp); + if (ret < 0) { + pr_err("get_Kc failed %d\n", ret); + goto err; + } + + ci = krb5_prepare_checksum(krb5, &keys, gfp); + if (IS_ERR(ci)) { + ret = PTR_ERR(ci); + goto err; + } + + kfree(keys.data); + return ci; +err: + kfree(keys.data); + return ERR_PTR(ret); +} +EXPORT_SYMBOL(crypto_krb5_prepare_checksum); + +/** + * crypto_krb5_encrypt - Apply Kerberos encryption and integrity. + * @krb5: The encoding to use. + * @aead: The keyed crypto object to use. + * @sg: Scatterlist defining the crypto buffer. + * @nr_sg: The number of elements in @sg. + * @sg_len: The size of the buffer. + * @data_offset: The offset of the data in the @sg buffer. + * @data_len: The length of the data. + * @preconfounded: True if the confounder is already inserted. + * + * Using the specified Kerberos encoding, insert a confounder and padding as + * needed, encrypt this and the data in place and insert an integrity checksum + * into the buffer. + * + * The buffer must include space for the confounder, the checksum and any + * padding required. The caller can preinsert the confounder into the buffer + * (for testing, for example). + * + * The resulting secured blob may be less than the size of the buffer. + * + * Returns the size of the secure blob if successful, -ENOMEM on an allocation + * failure, -EFAULT if there is insufficient space, -EMSGSIZE if the confounder + * is too short or the data is misaligned. Other errors may also be returned + * from the crypto layer. + */ +ssize_t crypto_krb5_encrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t sg_len, + size_t data_offset, size_t data_len, + bool preconfounded) +{ + if (WARN_ON(data_offset > sg_len || + data_len > sg_len || + data_offset > sg_len - data_len)) + return -EMSGSIZE; + return krb5->profile->encrypt(krb5, aead, sg, nr_sg, sg_len, + data_offset, data_len, preconfounded); +} +EXPORT_SYMBOL(crypto_krb5_encrypt); + +/** + * crypto_krb5_decrypt - Validate and remove Kerberos encryption and integrity. + * @krb5: The encoding to use. + * @aead: The keyed crypto object to use. + * @sg: Scatterlist defining the crypto buffer. + * @nr_sg: The number of elements in @sg. + * @_offset: Offset of the secure blob in the buffer; updated to data offset. + * @_len: The length of the secure blob; updated to data length. + * + * Using the specified Kerberos encoding, check and remove the integrity + * checksum and decrypt the secure region, stripping off the confounder. + * + * If successful, @_offset and @_len are updated to outline the region in which + * the data plus the trailing padding are stored. The caller is responsible + * for working out how much padding there is and removing it. + * + * Returns the 0 if successful, -ENOMEM on an allocation failure; sets + * *_error_code and returns -EPROTO if the data cannot be parsed, or -EBADMSG + * if the integrity checksum doesn't match). Other errors may also be returned + * from the crypto layer. + */ +int crypto_krb5_decrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len) +{ + return krb5->profile->decrypt(krb5, aead, sg, nr_sg, _offset, _len); +} +EXPORT_SYMBOL(crypto_krb5_decrypt); + +/** + * crypto_krb5_get_mic - Apply Kerberos integrity checksum. + * @krb5: The encoding to use. + * @shash: The keyed hash to use. + * @metadata: Metadata to add into the hash before adding the data. + * @sg: Scatterlist defining the crypto buffer. + * @nr_sg: The number of elements in @sg. + * @sg_len: The size of the buffer. + * @data_offset: The offset of the data in the @sg buffer. + * @data_len: The length of the data. + * + * Using the specified Kerberos encoding, calculate and insert an integrity + * checksum into the buffer. + * + * The buffer must include space for the checksum at the front. + * + * Returns the size of the secure blob if successful, -ENOMEM on an allocation + * failure, -EFAULT if there is insufficient space, -EMSGSIZE if the gap for + * the checksum is too short. Other errors may also be returned from the + * crypto layer. + */ +ssize_t crypto_krb5_get_mic(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, + size_t sg_len, + size_t data_offset, size_t data_len) +{ + if (WARN_ON(data_offset > sg_len || + data_len > sg_len || + data_offset > sg_len - data_len)) + return -EMSGSIZE; + return krb5->profile->get_mic(krb5, shash, metadata, sg, nr_sg, sg_len, + data_offset, data_len); +} +EXPORT_SYMBOL(crypto_krb5_get_mic); + +/** + * crypto_krb5_verify_mic - Validate and remove Kerberos integrity checksum. + * @krb5: The encoding to use. + * @shash: The keyed hash to use. + * @metadata: Metadata to add into the hash before adding the data. + * @sg: Scatterlist defining the crypto buffer. + * @nr_sg: The number of elements in @sg. + * @_offset: Offset of the secure blob in the buffer; updated to data offset. + * @_len: The length of the secure blob; updated to data length. + * + * Using the specified Kerberos encoding, check and remove the integrity + * checksum. + * + * If successful, @_offset and @_len are updated to outline the region in which + * the data is stored. + * + * Returns the 0 if successful, -ENOMEM on an allocation failure; sets + * *_error_code and returns -EPROTO if the data cannot be parsed, or -EBADMSG + * if the checksum doesn't match). Other errors may also be returned from the + * crypto layer. + */ +int crypto_krb5_verify_mic(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len) +{ + return krb5->profile->verify_mic(krb5, shash, metadata, sg, nr_sg, + _offset, _len); +} +EXPORT_SYMBOL(crypto_krb5_verify_mic); + +static int __init crypto_krb5_init(void) +{ + return krb5_selftest(); +} +module_init(crypto_krb5_init); + +static void __exit crypto_krb5_exit(void) +{ +} +module_exit(crypto_krb5_exit); diff --git a/crypto/krb5/krb5_kdf.c b/crypto/krb5/krb5_kdf.c new file mode 100644 index 000000000000..6699e5469d1b --- /dev/null +++ b/crypto/krb5/krb5_kdf.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Kerberos key derivation. + * + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/export.h> +#include <linux/slab.h> +#include <crypto/skcipher.h> +#include <crypto/hash.h> +#include "internal.h" + +/** + * crypto_krb5_calc_PRFplus - Calculate PRF+ [RFC4402] + * @krb5: The encryption type to use + * @K: The protocol key for the pseudo-random function + * @L: The length of the output + * @S: The input octet string + * @result: Result buffer, sized to krb5->prf_len + * @gfp: Allocation restrictions + * + * Calculate the kerberos pseudo-random function, PRF+() by the following + * method: + * + * PRF+(K, L, S) = truncate(L, T1 || T2 || .. || Tn) + * Tn = PRF(K, n || S) + * [rfc4402 sec 2] + */ +int crypto_krb5_calc_PRFplus(const struct krb5_enctype *krb5, + const struct krb5_buffer *K, + unsigned int L, + const struct krb5_buffer *S, + struct krb5_buffer *result, + gfp_t gfp) +{ + struct krb5_buffer T_series, Tn, n_S; + void *buffer; + int ret, n = 1; + + Tn.len = krb5->prf_len; + T_series.len = 0; + n_S.len = 4 + S->len; + + buffer = kzalloc(round16(L + Tn.len) + round16(n_S.len), gfp); + if (!buffer) + return -ENOMEM; + + T_series.data = buffer; + n_S.data = buffer + round16(L + Tn.len); + memcpy(n_S.data + 4, S->data, S->len); + + while (T_series.len < L) { + *(__be32 *)(n_S.data) = htonl(n); + Tn.data = T_series.data + Tn.len * (n - 1); + ret = krb5->profile->calc_PRF(krb5, K, &n_S, &Tn, gfp); + if (ret < 0) + goto err; + T_series.len += Tn.len; + n++; + } + + /* Truncate to L */ + memcpy(result->data, T_series.data, L); + ret = 0; + +err: + kfree_sensitive(buffer); + return ret; +} +EXPORT_SYMBOL(crypto_krb5_calc_PRFplus); + +/** + * krb5_derive_Kc - Derive key Kc and install into a hash + * @krb5: The encryption type to use + * @TK: The base key + * @usage: The key usage number + * @key: Prepped buffer to store the key into + * @gfp: Allocation restrictions + * + * Derive the Kerberos Kc checksumming key. The key is stored into the + * prepared buffer. + */ +int krb5_derive_Kc(const struct krb5_enctype *krb5, const struct krb5_buffer *TK, + u32 usage, struct krb5_buffer *key, gfp_t gfp) +{ + u8 buf[5] __aligned(CRYPTO_MINALIGN); + struct krb5_buffer usage_constant = { .len = 5, .data = buf }; + + *(__be32 *)buf = cpu_to_be32(usage); + buf[4] = KEY_USAGE_SEED_CHECKSUM; + + key->len = krb5->Kc_len; + return krb5->profile->calc_Kc(krb5, TK, &usage_constant, key, gfp); +} + +/** + * krb5_derive_Ke - Derive key Ke and install into an skcipher + * @krb5: The encryption type to use + * @TK: The base key + * @usage: The key usage number + * @key: Prepped buffer to store the key into + * @gfp: Allocation restrictions + * + * Derive the Kerberos Ke encryption key. The key is stored into the prepared + * buffer. + */ +int krb5_derive_Ke(const struct krb5_enctype *krb5, const struct krb5_buffer *TK, + u32 usage, struct krb5_buffer *key, gfp_t gfp) +{ + u8 buf[5] __aligned(CRYPTO_MINALIGN); + struct krb5_buffer usage_constant = { .len = 5, .data = buf }; + + *(__be32 *)buf = cpu_to_be32(usage); + buf[4] = KEY_USAGE_SEED_ENCRYPTION; + + key->len = krb5->Ke_len; + return krb5->profile->calc_Ke(krb5, TK, &usage_constant, key, gfp); +} + +/** + * krb5_derive_Ki - Derive key Ki and install into a hash + * @krb5: The encryption type to use + * @TK: The base key + * @usage: The key usage number + * @key: Prepped buffer to store the key into + * @gfp: Allocation restrictions + * + * Derive the Kerberos Ki integrity checksum key. The key is stored into the + * prepared buffer. + */ +int krb5_derive_Ki(const struct krb5_enctype *krb5, const struct krb5_buffer *TK, + u32 usage, struct krb5_buffer *key, gfp_t gfp) +{ + u8 buf[5] __aligned(CRYPTO_MINALIGN); + struct krb5_buffer usage_constant = { .len = 5, .data = buf }; + + *(__be32 *)buf = cpu_to_be32(usage); + buf[4] = KEY_USAGE_SEED_INTEGRITY; + + key->len = krb5->Ki_len; + return krb5->profile->calc_Ki(krb5, TK, &usage_constant, key, gfp); +} diff --git a/crypto/krb5/rfc3961_simplified.c b/crypto/krb5/rfc3961_simplified.c new file mode 100644 index 000000000000..79180d28baa9 --- /dev/null +++ b/crypto/krb5/rfc3961_simplified.c @@ -0,0 +1,792 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* rfc3961 Kerberos 5 simplified crypto profile. + * + * Parts borrowed from net/sunrpc/auth_gss/. + */ +/* + * COPYRIGHT (c) 2008 + * The Regents of the University of Michigan + * ALL RIGHTS RESERVED + * + * Permission is granted to use, copy, create derivative works + * and redistribute this software and such derivative works + * for any purpose, so long as the name of The University of + * Michigan is not used in any advertising or publicity + * pertaining to the use of distribution of this software + * without specific, written prior authorization. If the + * above copyright notice or any other identification of the + * University of Michigan is included in any copy of any + * portion of this software, then the disclaimer below must + * also be included. + * + * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION + * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY + * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF + * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING + * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE + * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE + * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR + * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING + * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN + * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGES. + */ + +/* + * Copyright (C) 1998 by the FundsXpress, INC. + * + * All rights reserved. + * + * Export of this software from the United States of America may require + * a specific license from the United States Government. It is the + * responsibility of any person or organization contemplating export to + * obtain such a license before exporting. + * + * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and + * distribute this software and its documentation for any purpose and + * without fee is hereby granted, provided that the above copyright + * notice appear in all copies and that both that copyright notice and + * this permission notice appear in supporting documentation, and that + * the name of FundsXpress. not be used in advertising or publicity pertaining + * to distribution of the software without specific, written prior + * permission. FundsXpress makes no representations about the suitability of + * this software for any purpose. It is provided "as is" without express + * or implied warranty. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + */ + +/* + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/random.h> +#include <linux/scatterlist.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/lcm.h> +#include <linux/rtnetlink.h> +#include <crypto/authenc.h> +#include <crypto/skcipher.h> +#include <crypto/hash.h> +#include "internal.h" + +/* Maximum blocksize for the supported crypto algorithms */ +#define KRB5_MAX_BLOCKSIZE (16) + +int crypto_shash_update_sg(struct shash_desc *desc, struct scatterlist *sg, + size_t offset, size_t len) +{ + struct sg_mapping_iter miter; + size_t i, n; + int ret = 0; + + sg_miter_start(&miter, sg, sg_nents(sg), + SG_MITER_FROM_SG | SG_MITER_LOCAL); + for (i = 0; i < len; i += n) { + sg_miter_next(&miter); + n = min(miter.length, len - i); + ret = crypto_shash_update(desc, miter.addr, n); + if (ret < 0) + break; + } + sg_miter_stop(&miter); + return ret; +} + +static int rfc3961_do_encrypt(struct crypto_sync_skcipher *tfm, void *iv, + const struct krb5_buffer *in, struct krb5_buffer *out) +{ + struct scatterlist sg[1]; + u8 local_iv[KRB5_MAX_BLOCKSIZE] __aligned(KRB5_MAX_BLOCKSIZE) = {0}; + SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm); + int ret; + + if (WARN_ON(in->len != out->len)) + return -EINVAL; + if (out->len % crypto_sync_skcipher_blocksize(tfm) != 0) + return -EINVAL; + + if (crypto_sync_skcipher_ivsize(tfm) > KRB5_MAX_BLOCKSIZE) + return -EINVAL; + + if (iv) + memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm)); + + memcpy(out->data, in->data, out->len); + sg_init_one(sg, out->data, out->len); + + skcipher_request_set_sync_tfm(req, tfm); + skcipher_request_set_callback(req, 0, NULL, NULL); + skcipher_request_set_crypt(req, sg, sg, out->len, local_iv); + + ret = crypto_skcipher_encrypt(req); + skcipher_request_zero(req); + return ret; +} + +/* + * Calculate an unkeyed basic hash. + */ +static int rfc3961_calc_H(const struct krb5_enctype *krb5, + const struct krb5_buffer *data, + struct krb5_buffer *digest, + gfp_t gfp) +{ + struct crypto_shash *tfm; + struct shash_desc *desc; + size_t desc_size; + int ret = -ENOMEM; + + tfm = crypto_alloc_shash(krb5->hash_name, 0, 0); + if (IS_ERR(tfm)) + return (PTR_ERR(tfm) == -ENOENT) ? -ENOPKG : PTR_ERR(tfm); + + desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); + + desc = kzalloc(desc_size, gfp); + if (!desc) + goto error_tfm; + + digest->len = crypto_shash_digestsize(tfm); + digest->data = kzalloc(digest->len, gfp); + if (!digest->data) + goto error_desc; + + desc->tfm = tfm; + ret = crypto_shash_init(desc); + if (ret < 0) + goto error_digest; + + ret = crypto_shash_finup(desc, data->data, data->len, digest->data); + if (ret < 0) + goto error_digest; + + goto error_desc; + +error_digest: + kfree_sensitive(digest->data); +error_desc: + kfree_sensitive(desc); +error_tfm: + crypto_free_shash(tfm); + return ret; +} + +/* + * This is the n-fold function as described in rfc3961, sec 5.1 + * Taken from MIT Kerberos and modified. + */ +static void rfc3961_nfold(const struct krb5_buffer *source, struct krb5_buffer *result) +{ + const u8 *in = source->data; + u8 *out = result->data; + unsigned long ulcm; + unsigned int inbits, outbits; + int byte, i, msbit; + + /* the code below is more readable if I make these bytes instead of bits */ + inbits = source->len; + outbits = result->len; + + /* first compute lcm(n,k) */ + ulcm = lcm(inbits, outbits); + + /* now do the real work */ + memset(out, 0, outbits); + byte = 0; + + /* this will end up cycling through k lcm(k,n)/k times, which + * is correct. + */ + for (i = ulcm-1; i >= 0; i--) { + /* compute the msbit in k which gets added into this byte */ + msbit = ( + /* first, start with the msbit in the first, + * unrotated byte + */ + ((inbits << 3) - 1) + + /* then, for each byte, shift to the right + * for each repetition + */ + (((inbits << 3) + 13) * (i/inbits)) + + /* last, pick out the correct byte within + * that shifted repetition + */ + ((inbits - (i % inbits)) << 3) + ) % (inbits << 3); + + /* pull out the byte value itself */ + byte += (((in[((inbits - 1) - (msbit >> 3)) % inbits] << 8) | + (in[((inbits) - (msbit >> 3)) % inbits])) + >> ((msbit & 7) + 1)) & 0xff; + + /* do the addition */ + byte += out[i % outbits]; + out[i % outbits] = byte & 0xff; + + /* keep around the carry bit, if any */ + byte >>= 8; + } + + /* if there's a carry bit left over, add it back in */ + if (byte) { + for (i = outbits - 1; i >= 0; i--) { + /* do the addition */ + byte += out[i]; + out[i] = byte & 0xff; + + /* keep around the carry bit, if any */ + byte >>= 8; + } + } +} + +/* + * Calculate a derived key, DK(Base Key, Well-Known Constant) + * + * DK(Key, Constant) = random-to-key(DR(Key, Constant)) + * DR(Key, Constant) = k-truncate(E(Key, Constant, initial-cipher-state)) + * K1 = E(Key, n-fold(Constant), initial-cipher-state) + * K2 = E(Key, K1, initial-cipher-state) + * K3 = E(Key, K2, initial-cipher-state) + * K4 = ... + * DR(Key, Constant) = k-truncate(K1 | K2 | K3 | K4 ...) + * [rfc3961 sec 5.1] + */ +static int rfc3961_calc_DK(const struct krb5_enctype *krb5, + const struct krb5_buffer *inkey, + const struct krb5_buffer *in_constant, + struct krb5_buffer *result, + gfp_t gfp) +{ + unsigned int blocksize, keybytes, keylength, n; + struct krb5_buffer inblock, outblock, rawkey; + struct crypto_sync_skcipher *cipher; + int ret = -EINVAL; + + blocksize = krb5->block_len; + keybytes = krb5->key_bytes; + keylength = krb5->key_len; + + if (inkey->len != keylength || result->len != keylength) + return -EINVAL; + if (!krb5->random_to_key && result->len != keybytes) + return -EINVAL; + + cipher = crypto_alloc_sync_skcipher(krb5->derivation_enc, 0, 0); + if (IS_ERR(cipher)) { + ret = (PTR_ERR(cipher) == -ENOENT) ? -ENOPKG : PTR_ERR(cipher); + goto err_return; + } + ret = crypto_sync_skcipher_setkey(cipher, inkey->data, inkey->len); + if (ret < 0) + goto err_free_cipher; + + ret = -ENOMEM; + inblock.data = kzalloc(blocksize * 2 + keybytes, gfp); + if (!inblock.data) + goto err_free_cipher; + + inblock.len = blocksize; + outblock.data = inblock.data + blocksize; + outblock.len = blocksize; + rawkey.data = outblock.data + blocksize; + rawkey.len = keybytes; + + /* initialize the input block */ + + if (in_constant->len == inblock.len) + memcpy(inblock.data, in_constant->data, inblock.len); + else + rfc3961_nfold(in_constant, &inblock); + + /* loop encrypting the blocks until enough key bytes are generated */ + n = 0; + while (n < rawkey.len) { + rfc3961_do_encrypt(cipher, NULL, &inblock, &outblock); + + if (keybytes - n <= outblock.len) { + memcpy(rawkey.data + n, outblock.data, keybytes - n); + break; + } + + memcpy(rawkey.data + n, outblock.data, outblock.len); + memcpy(inblock.data, outblock.data, outblock.len); + n += outblock.len; + } + + /* postprocess the key */ + if (!krb5->random_to_key) { + /* Identity random-to-key function. */ + memcpy(result->data, rawkey.data, rawkey.len); + ret = 0; + } else { + ret = krb5->random_to_key(krb5, &rawkey, result); + } + + kfree_sensitive(inblock.data); +err_free_cipher: + crypto_free_sync_skcipher(cipher); +err_return: + return ret; +} + +/* + * Calculate single encryption, E() + * + * E(Key, octets) + */ +static int rfc3961_calc_E(const struct krb5_enctype *krb5, + const struct krb5_buffer *key, + const struct krb5_buffer *in_data, + struct krb5_buffer *result, + gfp_t gfp) +{ + struct crypto_sync_skcipher *cipher; + int ret; + + cipher = crypto_alloc_sync_skcipher(krb5->derivation_enc, 0, 0); + if (IS_ERR(cipher)) { + ret = (PTR_ERR(cipher) == -ENOENT) ? -ENOPKG : PTR_ERR(cipher); + goto err; + } + + ret = crypto_sync_skcipher_setkey(cipher, key->data, key->len); + if (ret < 0) + goto err_free; + + ret = rfc3961_do_encrypt(cipher, NULL, in_data, result); + +err_free: + crypto_free_sync_skcipher(cipher); +err: + return ret; +} + +/* + * Calculate the pseudo-random function, PRF(). + * + * tmp1 = H(octet-string) + * tmp2 = truncate tmp1 to multiple of m + * PRF = E(DK(protocol-key, prfconstant), tmp2, initial-cipher-state) + * + * The "prfconstant" used in the PRF operation is the three-octet string + * "prf". + * [rfc3961 sec 5.3] + */ +static int rfc3961_calc_PRF(const struct krb5_enctype *krb5, + const struct krb5_buffer *protocol_key, + const struct krb5_buffer *octet_string, + struct krb5_buffer *result, + gfp_t gfp) +{ + static const struct krb5_buffer prfconstant = { 3, "prf" }; + struct krb5_buffer derived_key; + struct krb5_buffer tmp1, tmp2; + unsigned int m = krb5->block_len; + void *buffer; + int ret; + + if (result->len != krb5->prf_len) + return -EINVAL; + + tmp1.len = krb5->hash_len; + derived_key.len = krb5->key_bytes; + buffer = kzalloc(round16(tmp1.len) + round16(derived_key.len), gfp); + if (!buffer) + return -ENOMEM; + + tmp1.data = buffer; + derived_key.data = buffer + round16(tmp1.len); + + ret = rfc3961_calc_H(krb5, octet_string, &tmp1, gfp); + if (ret < 0) + goto err; + + tmp2.len = tmp1.len & ~(m - 1); + tmp2.data = tmp1.data; + + ret = rfc3961_calc_DK(krb5, protocol_key, &prfconstant, &derived_key, gfp); + if (ret < 0) + goto err; + + ret = rfc3961_calc_E(krb5, &derived_key, &tmp2, result, gfp); + +err: + kfree_sensitive(buffer); + return ret; +} + +/* + * Derive the Ke and Ki keys and package them into a key parameter that can be + * given to the setkey of a authenc AEAD crypto object. + */ +int authenc_derive_encrypt_keys(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + unsigned int usage, + struct krb5_buffer *setkey, + gfp_t gfp) +{ + struct crypto_authenc_key_param *param; + struct krb5_buffer Ke, Ki; + struct rtattr *rta; + int ret; + + Ke.len = krb5->Ke_len; + Ki.len = krb5->Ki_len; + setkey->len = RTA_LENGTH(sizeof(*param)) + Ke.len + Ki.len; + setkey->data = kzalloc(setkey->len, GFP_KERNEL); + if (!setkey->data) + return -ENOMEM; + + rta = setkey->data; + rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; + rta->rta_len = RTA_LENGTH(sizeof(*param)); + param = RTA_DATA(rta); + param->enckeylen = htonl(Ke.len); + + Ki.data = (void *)(param + 1); + Ke.data = Ki.data + Ki.len; + + ret = krb5_derive_Ke(krb5, TK, usage, &Ke, gfp); + if (ret < 0) { + pr_err("get_Ke failed %d\n", ret); + return ret; + } + ret = krb5_derive_Ki(krb5, TK, usage, &Ki, gfp); + if (ret < 0) + pr_err("get_Ki failed %d\n", ret); + return ret; +} + +/* + * Package predefined Ke and Ki keys and into a key parameter that can be given + * to the setkey of an authenc AEAD crypto object. + */ +int authenc_load_encrypt_keys(const struct krb5_enctype *krb5, + const struct krb5_buffer *Ke, + const struct krb5_buffer *Ki, + struct krb5_buffer *setkey, + gfp_t gfp) +{ + struct crypto_authenc_key_param *param; + struct rtattr *rta; + + setkey->len = RTA_LENGTH(sizeof(*param)) + Ke->len + Ki->len; + setkey->data = kzalloc(setkey->len, GFP_KERNEL); + if (!setkey->data) + return -ENOMEM; + + rta = setkey->data; + rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; + rta->rta_len = RTA_LENGTH(sizeof(*param)); + param = RTA_DATA(rta); + param->enckeylen = htonl(Ke->len); + memcpy((void *)(param + 1), Ki->data, Ki->len); + memcpy((void *)(param + 1) + Ki->len, Ke->data, Ke->len); + return 0; +} + +/* + * Derive the Kc key for checksum-only mode and package it into a key parameter + * that can be given to the setkey of a hash crypto object. + */ +int rfc3961_derive_checksum_key(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + unsigned int usage, + struct krb5_buffer *setkey, + gfp_t gfp) +{ + int ret; + + setkey->len = krb5->Kc_len; + setkey->data = kzalloc(setkey->len, GFP_KERNEL); + if (!setkey->data) + return -ENOMEM; + + ret = krb5_derive_Kc(krb5, TK, usage, setkey, gfp); + if (ret < 0) + pr_err("get_Kc failed %d\n", ret); + return ret; +} + +/* + * Package a predefined Kc key for checksum-only mode into a key parameter that + * can be given to the setkey of a hash crypto object. + */ +int rfc3961_load_checksum_key(const struct krb5_enctype *krb5, + const struct krb5_buffer *Kc, + struct krb5_buffer *setkey, + gfp_t gfp) +{ + setkey->len = krb5->Kc_len; + setkey->data = kmemdup(Kc->data, Kc->len, GFP_KERNEL); + if (!setkey->data) + return -ENOMEM; + return 0; +} + +/* + * Apply encryption and checksumming functions to part of a scatterlist. + */ +ssize_t krb5_aead_encrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, size_t sg_len, + size_t data_offset, size_t data_len, + bool preconfounded) +{ + struct aead_request *req; + ssize_t ret, done; + size_t bsize, base_len, secure_offset, secure_len, pad_len, cksum_offset; + void *buffer; + u8 *iv; + + if (WARN_ON(data_offset != krb5->conf_len)) + return -EINVAL; /* Data is in wrong place */ + + secure_offset = 0; + base_len = krb5->conf_len + data_len; + pad_len = 0; + secure_len = base_len + pad_len; + cksum_offset = secure_len; + if (WARN_ON(cksum_offset + krb5->cksum_len > sg_len)) + return -EFAULT; + + bsize = krb5_aead_size(aead) + + krb5_aead_ivsize(aead); + buffer = kzalloc(bsize, GFP_NOFS); + if (!buffer) + return -ENOMEM; + + /* Insert the confounder into the buffer */ + ret = -EFAULT; + if (!preconfounded) { + get_random_bytes(buffer, krb5->conf_len); + done = sg_pcopy_from_buffer(sg, nr_sg, buffer, krb5->conf_len, + secure_offset); + if (done != krb5->conf_len) + goto error; + } + + /* We may need to pad out to the crypto blocksize. */ + if (pad_len) { + done = sg_zero_buffer(sg, nr_sg, pad_len, data_offset + data_len); + if (done != pad_len) + goto error; + } + + /* Hash and encrypt the message. */ + req = buffer; + iv = buffer + krb5_aead_size(aead); + + aead_request_set_tfm(req, aead); + aead_request_set_callback(req, 0, NULL, NULL); + aead_request_set_crypt(req, sg, sg, secure_len, iv); + ret = crypto_aead_encrypt(req); + if (ret < 0) + goto error; + + ret = secure_len + krb5->cksum_len; + +error: + kfree_sensitive(buffer); + return ret; +} + +/* + * Apply decryption and checksumming functions to a message. The offset and + * length are updated to reflect the actual content of the encrypted region. + */ +int krb5_aead_decrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len) +{ + struct aead_request *req; + size_t bsize; + void *buffer; + int ret; + u8 *iv; + + if (WARN_ON(*_offset != 0)) + return -EINVAL; /* Can't set offset on aead */ + + if (*_len < krb5->conf_len + krb5->cksum_len) + return -EPROTO; + + bsize = krb5_aead_size(aead) + + krb5_aead_ivsize(aead); + buffer = kzalloc(bsize, GFP_NOFS); + if (!buffer) + return -ENOMEM; + + /* Decrypt the message and verify its checksum. */ + req = buffer; + iv = buffer + krb5_aead_size(aead); + + aead_request_set_tfm(req, aead); + aead_request_set_callback(req, 0, NULL, NULL); + aead_request_set_crypt(req, sg, sg, *_len, iv); + ret = crypto_aead_decrypt(req); + if (ret < 0) + goto error; + + /* Adjust the boundaries of the data. */ + *_offset += krb5->conf_len; + *_len -= krb5->conf_len + krb5->cksum_len; + ret = 0; + +error: + kfree_sensitive(buffer); + return ret; +} + +/* + * Generate a checksum over some metadata and part of an skbuff and insert the + * MIC into the skbuff immediately prior to the data. + */ +ssize_t rfc3961_get_mic(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, size_t sg_len, + size_t data_offset, size_t data_len) +{ + struct shash_desc *desc; + ssize_t ret, done; + size_t bsize; + void *buffer, *digest; + + if (WARN_ON(data_offset != krb5->cksum_len)) + return -EMSGSIZE; + + bsize = krb5_shash_size(shash) + + krb5_digest_size(shash); + buffer = kzalloc(bsize, GFP_NOFS); + if (!buffer) + return -ENOMEM; + + /* Calculate the MIC with key Kc and store it into the skb */ + desc = buffer; + desc->tfm = shash; + ret = crypto_shash_init(desc); + if (ret < 0) + goto error; + + if (metadata) { + ret = crypto_shash_update(desc, metadata->data, metadata->len); + if (ret < 0) + goto error; + } + + ret = crypto_shash_update_sg(desc, sg, data_offset, data_len); + if (ret < 0) + goto error; + + digest = buffer + krb5_shash_size(shash); + ret = crypto_shash_final(desc, digest); + if (ret < 0) + goto error; + + ret = -EFAULT; + done = sg_pcopy_from_buffer(sg, nr_sg, digest, krb5->cksum_len, + data_offset - krb5->cksum_len); + if (done != krb5->cksum_len) + goto error; + + ret = krb5->cksum_len + data_len; + +error: + kfree_sensitive(buffer); + return ret; +} + +/* + * Check the MIC on a region of an skbuff. The offset and length are updated + * to reflect the actual content of the secure region. + */ +int rfc3961_verify_mic(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len) +{ + struct shash_desc *desc; + ssize_t done; + size_t bsize, data_offset, data_len, offset = *_offset, len = *_len; + void *buffer = NULL; + int ret; + u8 *cksum, *cksum2; + + if (len < krb5->cksum_len) + return -EPROTO; + data_offset = offset + krb5->cksum_len; + data_len = len - krb5->cksum_len; + + bsize = krb5_shash_size(shash) + + krb5_digest_size(shash) * 2; + buffer = kzalloc(bsize, GFP_NOFS); + if (!buffer) + return -ENOMEM; + + cksum = buffer + + krb5_shash_size(shash); + cksum2 = buffer + + krb5_shash_size(shash) + + krb5_digest_size(shash); + + /* Calculate the MIC */ + desc = buffer; + desc->tfm = shash; + ret = crypto_shash_init(desc); + if (ret < 0) + goto error; + + if (metadata) { + ret = crypto_shash_update(desc, metadata->data, metadata->len); + if (ret < 0) + goto error; + } + + crypto_shash_update_sg(desc, sg, data_offset, data_len); + crypto_shash_final(desc, cksum); + + ret = -EFAULT; + done = sg_pcopy_to_buffer(sg, nr_sg, cksum2, krb5->cksum_len, offset); + if (done != krb5->cksum_len) + goto error; + + if (memcmp(cksum, cksum2, krb5->cksum_len) != 0) { + ret = -EBADMSG; + goto error; + } + + *_offset += krb5->cksum_len; + *_len -= krb5->cksum_len; + ret = 0; + +error: + kfree_sensitive(buffer); + return ret; +} + +const struct krb5_crypto_profile rfc3961_simplified_profile = { + .calc_PRF = rfc3961_calc_PRF, + .calc_Kc = rfc3961_calc_DK, + .calc_Ke = rfc3961_calc_DK, + .calc_Ki = rfc3961_calc_DK, + .derive_encrypt_keys = authenc_derive_encrypt_keys, + .load_encrypt_keys = authenc_load_encrypt_keys, + .derive_checksum_key = rfc3961_derive_checksum_key, + .load_checksum_key = rfc3961_load_checksum_key, + .encrypt = krb5_aead_encrypt, + .decrypt = krb5_aead_decrypt, + .get_mic = rfc3961_get_mic, + .verify_mic = rfc3961_verify_mic, +}; diff --git a/crypto/krb5/rfc3962_aes.c b/crypto/krb5/rfc3962_aes.c new file mode 100644 index 000000000000..5cbf8f4638b9 --- /dev/null +++ b/crypto/krb5/rfc3962_aes.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* rfc3962 Advanced Encryption Standard (AES) Encryption for Kerberos 5 + * + * Parts borrowed from net/sunrpc/auth_gss/. + */ +/* + * COPYRIGHT (c) 2008 + * The Regents of the University of Michigan + * ALL RIGHTS RESERVED + * + * Permission is granted to use, copy, create derivative works + * and redistribute this software and such derivative works + * for any purpose, so long as the name of The University of + * Michigan is not used in any advertising or publicity + * pertaining to the use of distribution of this software + * without specific, written prior authorization. If the + * above copyright notice or any other identification of the + * University of Michigan is included in any copy of any + * portion of this software, then the disclaimer below must + * also be included. + * + * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION + * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY + * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF + * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING + * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE + * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE + * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR + * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING + * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN + * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGES. + */ + +/* + * Copyright (C) 1998 by the FundsXpress, INC. + * + * All rights reserved. + * + * Export of this software from the United States of America may require + * a specific license from the United States Government. It is the + * responsibility of any person or organization contemplating export to + * obtain such a license before exporting. + * + * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and + * distribute this software and its documentation for any purpose and + * without fee is hereby granted, provided that the above copyright + * notice appear in all copies and that both that copyright notice and + * this permission notice appear in supporting documentation, and that + * the name of FundsXpress. not be used in advertising or publicity pertaining + * to distribution of the software without specific, written prior + * permission. FundsXpress makes no representations about the suitability of + * this software for any purpose. It is provided "as is" without express + * or implied warranty. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + */ + +/* + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "internal.h" + +const struct krb5_enctype krb5_aes128_cts_hmac_sha1_96 = { + .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA1_96, + .ctype = KRB5_CKSUMTYPE_HMAC_SHA1_96_AES128, + .name = "aes128-cts-hmac-sha1-96", + .encrypt_name = "krb5enc(hmac(sha1),cts(cbc(aes)))", + .cksum_name = "hmac(sha1)", + .hash_name = "sha1", + .derivation_enc = "cts(cbc(aes))", + .key_bytes = 16, + .key_len = 16, + .Kc_len = 16, + .Ke_len = 16, + .Ki_len = 16, + .block_len = 16, + .conf_len = 16, + .cksum_len = 12, + .hash_len = 20, + .prf_len = 16, + .keyed_cksum = true, + .random_to_key = NULL, /* Identity */ + .profile = &rfc3961_simplified_profile, +}; + +const struct krb5_enctype krb5_aes256_cts_hmac_sha1_96 = { + .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA1_96, + .ctype = KRB5_CKSUMTYPE_HMAC_SHA1_96_AES256, + .name = "aes256-cts-hmac-sha1-96", + .encrypt_name = "krb5enc(hmac(sha1),cts(cbc(aes)))", + .cksum_name = "hmac(sha1)", + .hash_name = "sha1", + .derivation_enc = "cts(cbc(aes))", + .key_bytes = 32, + .key_len = 32, + .Kc_len = 32, + .Ke_len = 32, + .Ki_len = 32, + .block_len = 16, + .conf_len = 16, + .cksum_len = 12, + .hash_len = 20, + .prf_len = 16, + .keyed_cksum = true, + .random_to_key = NULL, /* Identity */ + .profile = &rfc3961_simplified_profile, +}; diff --git a/crypto/krb5/rfc6803_camellia.c b/crypto/krb5/rfc6803_camellia.c new file mode 100644 index 000000000000..77cd4ce023f1 --- /dev/null +++ b/crypto/krb5/rfc6803_camellia.c @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* rfc6803 Camellia Encryption for Kerberos 5 + * + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/slab.h> +#include "internal.h" + +/* + * Calculate the key derivation function KDF-FEEDBACK_CMAC(key, constant) + * + * n = ceiling(k / 128) + * K(0) = zeros + * K(i) = CMAC(key, K(i-1) | i | constant | 0x00 | k) + * DR(key, constant) = k-truncate(K(1) | K(2) | ... | K(n)) + * KDF-FEEDBACK-CMAC(key, constant) = random-to-key(DR(key, constant)) + * + * [rfc6803 sec 3] + */ +static int rfc6803_calc_KDF_FEEDBACK_CMAC(const struct krb5_enctype *krb5, + const struct krb5_buffer *key, + const struct krb5_buffer *constant, + struct krb5_buffer *result, + gfp_t gfp) +{ + struct crypto_shash *shash; + struct krb5_buffer K, data; + struct shash_desc *desc; + __be32 tmp; + size_t bsize, offset, seg; + void *buffer; + u32 i = 0, k = result->len * 8; + u8 *p; + int ret = -ENOMEM; + + shash = crypto_alloc_shash(krb5->cksum_name, 0, 0); + if (IS_ERR(shash)) + return (PTR_ERR(shash) == -ENOENT) ? -ENOPKG : PTR_ERR(shash); + ret = crypto_shash_setkey(shash, key->data, key->len); + if (ret < 0) + goto error_shash; + + ret = -ENOMEM; + K.len = crypto_shash_digestsize(shash); + data.len = K.len + 4 + constant->len + 1 + 4; + bsize = krb5_shash_size(shash) + + krb5_digest_size(shash) + + crypto_roundup(K.len) + + crypto_roundup(data.len); + buffer = kzalloc(bsize, GFP_NOFS); + if (!buffer) + goto error_shash; + + desc = buffer; + desc->tfm = shash; + + K.data = buffer + + krb5_shash_size(shash) + + krb5_digest_size(shash); + data.data = buffer + + krb5_shash_size(shash) + + krb5_digest_size(shash) + + crypto_roundup(K.len); + + p = data.data + K.len + 4; + memcpy(p, constant->data, constant->len); + p += constant->len; + *p++ = 0x00; + tmp = htonl(k); + memcpy(p, &tmp, 4); + p += 4; + + ret = -EINVAL; + if (WARN_ON(p - (u8 *)data.data != data.len)) + goto error; + + offset = 0; + do { + i++; + p = data.data; + memcpy(p, K.data, K.len); + p += K.len; + *(__be32 *)p = htonl(i); + + ret = crypto_shash_init(desc); + if (ret < 0) + goto error; + ret = crypto_shash_finup(desc, data.data, data.len, K.data); + if (ret < 0) + goto error; + + seg = min_t(size_t, result->len - offset, K.len); + memcpy(result->data + offset, K.data, seg); + offset += seg; + } while (offset < result->len); + +error: + kfree_sensitive(buffer); +error_shash: + crypto_free_shash(shash); + return ret; +} + +/* + * Calculate the pseudo-random function, PRF(). + * + * Kp = KDF-FEEDBACK-CMAC(protocol-key, "prf") + * PRF = CMAC(Kp, octet-string) + * [rfc6803 sec 6] + */ +static int rfc6803_calc_PRF(const struct krb5_enctype *krb5, + const struct krb5_buffer *protocol_key, + const struct krb5_buffer *octet_string, + struct krb5_buffer *result, + gfp_t gfp) +{ + static const struct krb5_buffer prfconstant = { 3, "prf" }; + struct crypto_shash *shash; + struct krb5_buffer Kp; + struct shash_desc *desc; + size_t bsize; + void *buffer; + int ret; + + Kp.len = krb5->prf_len; + + shash = crypto_alloc_shash(krb5->cksum_name, 0, 0); + if (IS_ERR(shash)) + return (PTR_ERR(shash) == -ENOENT) ? -ENOPKG : PTR_ERR(shash); + + ret = -EINVAL; + if (result->len != crypto_shash_digestsize(shash)) + goto out_shash; + + ret = -ENOMEM; + bsize = krb5_shash_size(shash) + + krb5_digest_size(shash) + + crypto_roundup(Kp.len); + buffer = kzalloc(bsize, GFP_NOFS); + if (!buffer) + goto out_shash; + + Kp.data = buffer + + krb5_shash_size(shash) + + krb5_digest_size(shash); + + ret = rfc6803_calc_KDF_FEEDBACK_CMAC(krb5, protocol_key, &prfconstant, + &Kp, gfp); + if (ret < 0) + goto out; + + ret = crypto_shash_setkey(shash, Kp.data, Kp.len); + if (ret < 0) + goto out; + + desc = buffer; + desc->tfm = shash; + ret = crypto_shash_init(desc); + if (ret < 0) + goto out; + + ret = crypto_shash_finup(desc, octet_string->data, octet_string->len, result->data); + if (ret < 0) + goto out; + +out: + kfree_sensitive(buffer); +out_shash: + crypto_free_shash(shash); + return ret; +} + + +static const struct krb5_crypto_profile rfc6803_crypto_profile = { + .calc_PRF = rfc6803_calc_PRF, + .calc_Kc = rfc6803_calc_KDF_FEEDBACK_CMAC, + .calc_Ke = rfc6803_calc_KDF_FEEDBACK_CMAC, + .calc_Ki = rfc6803_calc_KDF_FEEDBACK_CMAC, + .derive_encrypt_keys = authenc_derive_encrypt_keys, + .load_encrypt_keys = authenc_load_encrypt_keys, + .derive_checksum_key = rfc3961_derive_checksum_key, + .load_checksum_key = rfc3961_load_checksum_key, + .encrypt = krb5_aead_encrypt, + .decrypt = krb5_aead_decrypt, + .get_mic = rfc3961_get_mic, + .verify_mic = rfc3961_verify_mic, +}; + +const struct krb5_enctype krb5_camellia128_cts_cmac = { + .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC, + .ctype = KRB5_CKSUMTYPE_CMAC_CAMELLIA128, + .name = "camellia128-cts-cmac", + .encrypt_name = "krb5enc(cmac(camellia),cts(cbc(camellia)))", + .cksum_name = "cmac(camellia)", + .hash_name = NULL, + .derivation_enc = "cts(cbc(camellia))", + .key_bytes = 16, + .key_len = 16, + .Kc_len = 16, + .Ke_len = 16, + .Ki_len = 16, + .block_len = 16, + .conf_len = 16, + .cksum_len = 16, + .hash_len = 16, + .prf_len = 16, + .keyed_cksum = true, + .random_to_key = NULL, /* Identity */ + .profile = &rfc6803_crypto_profile, +}; + +const struct krb5_enctype krb5_camellia256_cts_cmac = { + .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC, + .ctype = KRB5_CKSUMTYPE_CMAC_CAMELLIA256, + .name = "camellia256-cts-cmac", + .encrypt_name = "krb5enc(cmac(camellia),cts(cbc(camellia)))", + .cksum_name = "cmac(camellia)", + .hash_name = NULL, + .derivation_enc = "cts(cbc(camellia))", + .key_bytes = 32, + .key_len = 32, + .Kc_len = 32, + .Ke_len = 32, + .Ki_len = 32, + .block_len = 16, + .conf_len = 16, + .cksum_len = 16, + .hash_len = 16, + .prf_len = 16, + .keyed_cksum = true, + .random_to_key = NULL, /* Identity */ + .profile = &rfc6803_crypto_profile, +}; diff --git a/crypto/krb5/rfc8009_aes2.c b/crypto/krb5/rfc8009_aes2.c new file mode 100644 index 000000000000..d39851fc3a4e --- /dev/null +++ b/crypto/krb5/rfc8009_aes2.c @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* rfc8009 AES Encryption with HMAC-SHA2 for Kerberos 5 + * + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/slab.h> +#include <crypto/authenc.h> +#include "internal.h" + +static const struct krb5_buffer rfc8009_no_context = { .len = 0, .data = "" }; + +/* + * Calculate the key derivation function KDF-HMAC-SHA2(key, label, [context,] k) + * + * KDF-HMAC-SHA2(key, label, [context,] k) = k-truncate(K1) + * + * Using the appropriate one of: + * K1 = HMAC-SHA-256(key, 0x00000001 | label | 0x00 | k) + * K1 = HMAC-SHA-384(key, 0x00000001 | label | 0x00 | k) + * K1 = HMAC-SHA-256(key, 0x00000001 | label | 0x00 | context | k) + * K1 = HMAC-SHA-384(key, 0x00000001 | label | 0x00 | context | k) + * [rfc8009 sec 3] + */ +static int rfc8009_calc_KDF_HMAC_SHA2(const struct krb5_enctype *krb5, + const struct krb5_buffer *key, + const struct krb5_buffer *label, + const struct krb5_buffer *context, + unsigned int k, + struct krb5_buffer *result, + gfp_t gfp) +{ + struct crypto_shash *shash; + struct krb5_buffer K1, data; + struct shash_desc *desc; + __be32 tmp; + size_t bsize; + void *buffer; + u8 *p; + int ret = -ENOMEM; + + if (WARN_ON(result->len != k / 8)) + return -EINVAL; + + shash = crypto_alloc_shash(krb5->cksum_name, 0, 0); + if (IS_ERR(shash)) + return (PTR_ERR(shash) == -ENOENT) ? -ENOPKG : PTR_ERR(shash); + ret = crypto_shash_setkey(shash, key->data, key->len); + if (ret < 0) + goto error_shash; + + ret = -EINVAL; + if (WARN_ON(crypto_shash_digestsize(shash) * 8 < k)) + goto error_shash; + + ret = -ENOMEM; + data.len = 4 + label->len + 1 + context->len + 4; + bsize = krb5_shash_size(shash) + + krb5_digest_size(shash) + + crypto_roundup(data.len); + buffer = kzalloc(bsize, GFP_NOFS); + if (!buffer) + goto error_shash; + + desc = buffer; + desc->tfm = shash; + ret = crypto_shash_init(desc); + if (ret < 0) + goto error; + + p = data.data = buffer + + krb5_shash_size(shash) + + krb5_digest_size(shash); + *(__be32 *)p = htonl(0x00000001); + p += 4; + memcpy(p, label->data, label->len); + p += label->len; + *p++ = 0; + memcpy(p, context->data, context->len); + p += context->len; + tmp = htonl(k); + memcpy(p, &tmp, 4); + p += 4; + + ret = -EINVAL; + if (WARN_ON(p - (u8 *)data.data != data.len)) + goto error; + + K1.len = crypto_shash_digestsize(shash); + K1.data = buffer + + krb5_shash_size(shash); + + ret = crypto_shash_finup(desc, data.data, data.len, K1.data); + if (ret < 0) + goto error; + + memcpy(result->data, K1.data, result->len); + +error: + kfree_sensitive(buffer); +error_shash: + crypto_free_shash(shash); + return ret; +} + +/* + * Calculate the pseudo-random function, PRF(). + * + * PRF = KDF-HMAC-SHA2(input-key, "prf", octet-string, 256) + * PRF = KDF-HMAC-SHA2(input-key, "prf", octet-string, 384) + * + * The "prfconstant" used in the PRF operation is the three-octet string + * "prf". + * [rfc8009 sec 5] + */ +static int rfc8009_calc_PRF(const struct krb5_enctype *krb5, + const struct krb5_buffer *input_key, + const struct krb5_buffer *octet_string, + struct krb5_buffer *result, + gfp_t gfp) +{ + static const struct krb5_buffer prfconstant = { 3, "prf" }; + + return rfc8009_calc_KDF_HMAC_SHA2(krb5, input_key, &prfconstant, + octet_string, krb5->prf_len * 8, + result, gfp); +} + +/* + * Derive Ke. + * Ke = KDF-HMAC-SHA2(base-key, usage | 0xAA, 128) + * Ke = KDF-HMAC-SHA2(base-key, usage | 0xAA, 256) + * [rfc8009 sec 5] + */ +static int rfc8009_calc_Ke(const struct krb5_enctype *krb5, + const struct krb5_buffer *base_key, + const struct krb5_buffer *usage_constant, + struct krb5_buffer *result, + gfp_t gfp) +{ + return rfc8009_calc_KDF_HMAC_SHA2(krb5, base_key, usage_constant, + &rfc8009_no_context, krb5->key_bytes * 8, + result, gfp); +} + +/* + * Derive Kc/Ki + * Kc = KDF-HMAC-SHA2(base-key, usage | 0x99, 128) + * Ki = KDF-HMAC-SHA2(base-key, usage | 0x55, 128) + * Kc = KDF-HMAC-SHA2(base-key, usage | 0x99, 192) + * Ki = KDF-HMAC-SHA2(base-key, usage | 0x55, 192) + * [rfc8009 sec 5] + */ +static int rfc8009_calc_Ki(const struct krb5_enctype *krb5, + const struct krb5_buffer *base_key, + const struct krb5_buffer *usage_constant, + struct krb5_buffer *result, + gfp_t gfp) +{ + return rfc8009_calc_KDF_HMAC_SHA2(krb5, base_key, usage_constant, + &rfc8009_no_context, krb5->cksum_len * 8, + result, gfp); +} + +/* + * Apply encryption and checksumming functions to a message. Unlike for + * RFC3961, for RFC8009, we have to chuck the starting IV into the hash first. + */ +static ssize_t rfc8009_encrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, size_t sg_len, + size_t data_offset, size_t data_len, + bool preconfounded) +{ + struct aead_request *req; + struct scatterlist bsg[2]; + ssize_t ret, done; + size_t bsize, base_len, secure_offset, secure_len, pad_len, cksum_offset; + void *buffer; + u8 *iv, *ad; + + if (WARN_ON(data_offset != krb5->conf_len)) + return -EINVAL; /* Data is in wrong place */ + + secure_offset = 0; + base_len = krb5->conf_len + data_len; + pad_len = 0; + secure_len = base_len + pad_len; + cksum_offset = secure_len; + if (WARN_ON(cksum_offset + krb5->cksum_len > sg_len)) + return -EFAULT; + + bsize = krb5_aead_size(aead) + + krb5_aead_ivsize(aead) * 2; + buffer = kzalloc(bsize, GFP_NOFS); + if (!buffer) + return -ENOMEM; + + req = buffer; + iv = buffer + krb5_aead_size(aead); + ad = buffer + krb5_aead_size(aead) + krb5_aead_ivsize(aead); + + /* Insert the confounder into the buffer */ + ret = -EFAULT; + if (!preconfounded) { + get_random_bytes(buffer, krb5->conf_len); + done = sg_pcopy_from_buffer(sg, nr_sg, buffer, krb5->conf_len, + secure_offset); + if (done != krb5->conf_len) + goto error; + } + + /* We may need to pad out to the crypto blocksize. */ + if (pad_len) { + done = sg_zero_buffer(sg, nr_sg, pad_len, data_offset + data_len); + if (done != pad_len) + goto error; + } + + /* We need to include the starting IV in the hash. */ + sg_init_table(bsg, 2); + sg_set_buf(&bsg[0], ad, krb5_aead_ivsize(aead)); + sg_chain(bsg, 2, sg); + + /* Hash and encrypt the message. */ + aead_request_set_tfm(req, aead); + aead_request_set_callback(req, 0, NULL, NULL); + aead_request_set_ad(req, krb5_aead_ivsize(aead)); + aead_request_set_crypt(req, bsg, bsg, secure_len, iv); + ret = crypto_aead_encrypt(req); + if (ret < 0) + goto error; + + ret = secure_len + krb5->cksum_len; + +error: + kfree_sensitive(buffer); + return ret; +} + +/* + * Apply decryption and checksumming functions to a message. Unlike for + * RFC3961, for RFC8009, we have to chuck the starting IV into the hash first. + * + * The offset and length are updated to reflect the actual content of the + * encrypted region. + */ +static int rfc8009_decrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len) +{ + struct aead_request *req; + struct scatterlist bsg[2]; + size_t bsize; + void *buffer; + int ret; + u8 *iv, *ad; + + if (WARN_ON(*_offset != 0)) + return -EINVAL; /* Can't set offset on aead */ + + if (*_len < krb5->conf_len + krb5->cksum_len) + return -EPROTO; + + bsize = krb5_aead_size(aead) + + krb5_aead_ivsize(aead) * 2; + buffer = kzalloc(bsize, GFP_NOFS); + if (!buffer) + return -ENOMEM; + + req = buffer; + iv = buffer + krb5_aead_size(aead); + ad = buffer + krb5_aead_size(aead) + krb5_aead_ivsize(aead); + + /* We need to include the starting IV in the hash. */ + sg_init_table(bsg, 2); + sg_set_buf(&bsg[0], ad, krb5_aead_ivsize(aead)); + sg_chain(bsg, 2, sg); + + /* Decrypt the message and verify its checksum. */ + aead_request_set_tfm(req, aead); + aead_request_set_callback(req, 0, NULL, NULL); + aead_request_set_ad(req, krb5_aead_ivsize(aead)); + aead_request_set_crypt(req, bsg, bsg, *_len, iv); + ret = crypto_aead_decrypt(req); + if (ret < 0) + goto error; + + /* Adjust the boundaries of the data. */ + *_offset += krb5->conf_len; + *_len -= krb5->conf_len + krb5->cksum_len; + ret = 0; + +error: + kfree_sensitive(buffer); + return ret; +} + +static const struct krb5_crypto_profile rfc8009_crypto_profile = { + .calc_PRF = rfc8009_calc_PRF, + .calc_Kc = rfc8009_calc_Ki, + .calc_Ke = rfc8009_calc_Ke, + .calc_Ki = rfc8009_calc_Ki, + .derive_encrypt_keys = authenc_derive_encrypt_keys, + .load_encrypt_keys = authenc_load_encrypt_keys, + .derive_checksum_key = rfc3961_derive_checksum_key, + .load_checksum_key = rfc3961_load_checksum_key, + .encrypt = rfc8009_encrypt, + .decrypt = rfc8009_decrypt, + .get_mic = rfc3961_get_mic, + .verify_mic = rfc3961_verify_mic, +}; + +const struct krb5_enctype krb5_aes128_cts_hmac_sha256_128 = { + .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128, + .ctype = KRB5_CKSUMTYPE_HMAC_SHA256_128_AES128, + .name = "aes128-cts-hmac-sha256-128", + .encrypt_name = "authenc(hmac(sha256),cts(cbc(aes)))", + .cksum_name = "hmac(sha256)", + .hash_name = "sha256", + .derivation_enc = "cts(cbc(aes))", + .key_bytes = 16, + .key_len = 16, + .Kc_len = 16, + .Ke_len = 16, + .Ki_len = 16, + .block_len = 16, + .conf_len = 16, + .cksum_len = 16, + .hash_len = 20, + .prf_len = 32, + .keyed_cksum = true, + .random_to_key = NULL, /* Identity */ + .profile = &rfc8009_crypto_profile, +}; + +const struct krb5_enctype krb5_aes256_cts_hmac_sha384_192 = { + .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192, + .ctype = KRB5_CKSUMTYPE_HMAC_SHA384_192_AES256, + .name = "aes256-cts-hmac-sha384-192", + .encrypt_name = "authenc(hmac(sha384),cts(cbc(aes)))", + .cksum_name = "hmac(sha384)", + .hash_name = "sha384", + .derivation_enc = "cts(cbc(aes))", + .key_bytes = 32, + .key_len = 32, + .Kc_len = 24, + .Ke_len = 32, + .Ki_len = 24, + .block_len = 16, + .conf_len = 16, + .cksum_len = 24, + .hash_len = 20, + .prf_len = 48, + .keyed_cksum = true, + .random_to_key = NULL, /* Identity */ + .profile = &rfc8009_crypto_profile, +}; diff --git a/crypto/krb5/selftest.c b/crypto/krb5/selftest.c new file mode 100644 index 000000000000..2a81a6315a0d --- /dev/null +++ b/crypto/krb5/selftest.c @@ -0,0 +1,544 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Kerberos library self-testing + * + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/slab.h> +#include <crypto/skcipher.h> +#include <crypto/hash.h> +#include "internal.h" + +#define VALID(X) \ + ({ \ + bool __x = (X); \ + if (__x) { \ + pr_warn("!!! TESTINVAL %s:%u\n", __FILE__, __LINE__); \ + ret = -EBADMSG; \ + } \ + __x; \ + }) + +#define CHECK(X) \ + ({ \ + bool __x = (X); \ + if (__x) { \ + pr_warn("!!! TESTFAIL %s:%u\n", __FILE__, __LINE__); \ + ret = -EBADMSG; \ + } \ + __x; \ + }) + +enum which_key { + TEST_KC, TEST_KE, TEST_KI, +}; + +#if 0 +static void dump_sg(struct scatterlist *sg, unsigned int limit) +{ + unsigned int index = 0, n = 0; + + for (; sg && limit > 0; sg = sg_next(sg)) { + unsigned int off = sg->offset, len = umin(sg->length, limit); + const void *p = kmap_local_page(sg_page(sg)); + + limit -= len; + while (len > 0) { + unsigned int part = umin(len, 32); + + pr_notice("[%x] %04x: %*phN\n", n, index, part, p + off); + index += part; + off += part; + len -= part; + } + + kunmap_local(p); + n++; + } +} +#endif + +static int prep_buf(struct krb5_buffer *buf) +{ + buf->data = kmalloc(buf->len, GFP_KERNEL); + if (!buf->data) + return -ENOMEM; + return 0; +} + +#define PREP_BUF(BUF, LEN) \ + do { \ + (BUF)->len = (LEN); \ + ret = prep_buf((BUF)); \ + if (ret < 0) \ + goto out; \ + } while (0) + +static int load_buf(struct krb5_buffer *buf, const char *from) +{ + size_t len = strlen(from); + int ret; + + if (len > 1 && from[0] == '\'') { + PREP_BUF(buf, len - 1); + memcpy(buf->data, from + 1, len - 1); + ret = 0; + goto out; + } + + if (VALID(len & 1)) + return -EINVAL; + + PREP_BUF(buf, len / 2); + ret = hex2bin(buf->data, from, buf->len); + if (ret < 0) { + VALID(1); + goto out; + } +out: + return ret; +} + +#define LOAD_BUF(BUF, FROM) do { ret = load_buf(BUF, FROM); if (ret < 0) goto out; } while (0) + +static void clear_buf(struct krb5_buffer *buf) +{ + kfree(buf->data); + buf->len = 0; + buf->data = NULL; +} + +/* + * Perform a pseudo-random function check. + */ +static int krb5_test_one_prf(const struct krb5_prf_test *test) +{ + const struct krb5_enctype *krb5 = crypto_krb5_find_enctype(test->etype); + struct krb5_buffer key = {}, octet = {}, result = {}, prf = {}; + int ret; + + if (!krb5) + return -EOPNOTSUPP; + + pr_notice("Running %s %s\n", krb5->name, test->name); + + LOAD_BUF(&key, test->key); + LOAD_BUF(&octet, test->octet); + LOAD_BUF(&prf, test->prf); + PREP_BUF(&result, krb5->prf_len); + + if (VALID(result.len != prf.len)) { + ret = -EINVAL; + goto out; + } + + ret = krb5->profile->calc_PRF(krb5, &key, &octet, &result, GFP_KERNEL); + if (ret < 0) { + CHECK(1); + pr_warn("PRF calculation failed %d\n", ret); + goto out; + } + + if (memcmp(result.data, prf.data, result.len) != 0) { + CHECK(1); + ret = -EKEYREJECTED; + goto out; + } + + ret = 0; + +out: + clear_buf(&result); + clear_buf(&octet); + clear_buf(&key); + return ret; +} + +/* + * Perform a key derivation check. + */ +static int krb5_test_key(const struct krb5_enctype *krb5, + const struct krb5_buffer *base_key, + const struct krb5_key_test_one *test, + enum which_key which) +{ + struct krb5_buffer key = {}, result = {}; + int ret; + + LOAD_BUF(&key, test->key); + PREP_BUF(&result, key.len); + + switch (which) { + case TEST_KC: + ret = krb5_derive_Kc(krb5, base_key, test->use, &result, GFP_KERNEL); + break; + case TEST_KE: + ret = krb5_derive_Ke(krb5, base_key, test->use, &result, GFP_KERNEL); + break; + case TEST_KI: + ret = krb5_derive_Ki(krb5, base_key, test->use, &result, GFP_KERNEL); + break; + default: + VALID(1); + ret = -EINVAL; + goto out; + } + + if (ret < 0) { + CHECK(1); + pr_warn("Key derivation failed %d\n", ret); + goto out; + } + + if (memcmp(result.data, key.data, result.len) != 0) { + CHECK(1); + ret = -EKEYREJECTED; + goto out; + } + +out: + clear_buf(&key); + clear_buf(&result); + return ret; +} + +static int krb5_test_one_key(const struct krb5_key_test *test) +{ + const struct krb5_enctype *krb5 = crypto_krb5_find_enctype(test->etype); + struct krb5_buffer base_key = {}; + int ret; + + if (!krb5) + return -EOPNOTSUPP; + + pr_notice("Running %s %s\n", krb5->name, test->name); + + LOAD_BUF(&base_key, test->key); + + ret = krb5_test_key(krb5, &base_key, &test->Kc, TEST_KC); + if (ret < 0) + goto out; + ret = krb5_test_key(krb5, &base_key, &test->Ke, TEST_KE); + if (ret < 0) + goto out; + ret = krb5_test_key(krb5, &base_key, &test->Ki, TEST_KI); + if (ret < 0) + goto out; + +out: + clear_buf(&base_key); + return ret; +} + +/* + * Perform an encryption test. + */ +static int krb5_test_one_enc(const struct krb5_enc_test *test, void *buf) +{ + const struct krb5_enctype *krb5 = crypto_krb5_find_enctype(test->etype); + struct crypto_aead *ci = NULL; + struct krb5_buffer K0 = {}, Ke = {}, Ki = {}, keys = {}; + struct krb5_buffer conf = {}, plain = {}, ct = {}; + struct scatterlist sg[1]; + size_t data_len, data_offset, message_len; + int ret; + + if (!krb5) + return -EOPNOTSUPP; + + pr_notice("Running %s %s\n", krb5->name, test->name); + + /* Load the test data into binary buffers. */ + LOAD_BUF(&conf, test->conf); + LOAD_BUF(&plain, test->plain); + LOAD_BUF(&ct, test->ct); + + if (test->K0) { + LOAD_BUF(&K0, test->K0); + } else { + LOAD_BUF(&Ke, test->Ke); + LOAD_BUF(&Ki, test->Ki); + + ret = krb5->profile->load_encrypt_keys(krb5, &Ke, &Ki, &keys, GFP_KERNEL); + if (ret < 0) + goto out; + } + + if (VALID(conf.len != krb5->conf_len) || + VALID(ct.len != krb5->conf_len + plain.len + krb5->cksum_len)) + goto out; + + data_len = plain.len; + message_len = crypto_krb5_how_much_buffer(krb5, KRB5_ENCRYPT_MODE, + data_len, &data_offset); + + if (CHECK(message_len != ct.len)) { + pr_warn("Encrypted length mismatch %zu != %u\n", message_len, ct.len); + goto out; + } + if (CHECK(data_offset != conf.len)) { + pr_warn("Data offset mismatch %zu != %u\n", data_offset, conf.len); + goto out; + } + + memcpy(buf, conf.data, conf.len); + memcpy(buf + data_offset, plain.data, plain.len); + + /* Allocate a crypto object and set its key. */ + if (test->K0) + ci = crypto_krb5_prepare_encryption(krb5, &K0, test->usage, GFP_KERNEL); + else + ci = krb5_prepare_encryption(krb5, &keys, GFP_KERNEL); + + if (IS_ERR(ci)) { + ret = PTR_ERR(ci); + ci = NULL; + pr_err("Couldn't alloc AEAD %s: %d\n", krb5->encrypt_name, ret); + goto out; + } + + /* Encrypt the message. */ + sg_init_one(sg, buf, message_len); + ret = crypto_krb5_encrypt(krb5, ci, sg, 1, message_len, + data_offset, data_len, true); + if (ret < 0) { + CHECK(1); + pr_warn("Encryption failed %d\n", ret); + goto out; + } + if (ret != message_len) { + CHECK(1); + pr_warn("Encrypted message wrong size %x != %zx\n", ret, message_len); + goto out; + } + + if (memcmp(buf, ct.data, ct.len) != 0) { + CHECK(1); + pr_warn("Ciphertext mismatch\n"); + pr_warn("BUF %*phN\n", ct.len, buf); + pr_warn("CT %*phN\n", ct.len, ct.data); + pr_warn("PT %*phN%*phN\n", conf.len, conf.data, plain.len, plain.data); + ret = -EKEYREJECTED; + goto out; + } + + /* Decrypt the encrypted message. */ + data_offset = 0; + data_len = message_len; + ret = crypto_krb5_decrypt(krb5, ci, sg, 1, &data_offset, &data_len); + if (ret < 0) { + CHECK(1); + pr_warn("Decryption failed %d\n", ret); + goto out; + } + + if (CHECK(data_offset != conf.len) || + CHECK(data_len != plain.len)) + goto out; + + if (memcmp(buf, conf.data, conf.len) != 0) { + CHECK(1); + pr_warn("Confounder mismatch\n"); + pr_warn("ENC %*phN\n", conf.len, buf); + pr_warn("DEC %*phN\n", conf.len, conf.data); + ret = -EKEYREJECTED; + goto out; + } + + if (memcmp(buf + conf.len, plain.data, plain.len) != 0) { + CHECK(1); + pr_warn("Plaintext mismatch\n"); + pr_warn("BUF %*phN\n", plain.len, buf + conf.len); + pr_warn("PT %*phN\n", plain.len, plain.data); + ret = -EKEYREJECTED; + goto out; + } + + ret = 0; + +out: + clear_buf(&ct); + clear_buf(&plain); + clear_buf(&conf); + clear_buf(&keys); + clear_buf(&Ki); + clear_buf(&Ke); + clear_buf(&K0); + if (ci) + crypto_free_aead(ci); + return ret; +} + +/* + * Perform a checksum test. + */ +static int krb5_test_one_mic(const struct krb5_mic_test *test, void *buf) +{ + const struct krb5_enctype *krb5 = crypto_krb5_find_enctype(test->etype); + struct crypto_shash *ci = NULL; + struct scatterlist sg[1]; + struct krb5_buffer K0 = {}, Kc = {}, keys = {}, plain = {}, mic = {}; + size_t offset, len, message_len; + int ret; + + if (!krb5) + return -EOPNOTSUPP; + + pr_notice("Running %s %s\n", krb5->name, test->name); + + /* Allocate a crypto object and set its key. */ + if (test->K0) { + LOAD_BUF(&K0, test->K0); + ci = crypto_krb5_prepare_checksum(krb5, &K0, test->usage, GFP_KERNEL); + } else { + LOAD_BUF(&Kc, test->Kc); + + ret = krb5->profile->load_checksum_key(krb5, &Kc, &keys, GFP_KERNEL); + if (ret < 0) + goto out; + + ci = krb5_prepare_checksum(krb5, &Kc, GFP_KERNEL); + } + if (IS_ERR(ci)) { + ret = PTR_ERR(ci); + ci = NULL; + pr_err("Couldn't alloc shash %s: %d\n", krb5->cksum_name, ret); + goto out; + } + + /* Load the test data into binary buffers. */ + LOAD_BUF(&plain, test->plain); + LOAD_BUF(&mic, test->mic); + + len = plain.len; + message_len = crypto_krb5_how_much_buffer(krb5, KRB5_CHECKSUM_MODE, + len, &offset); + + if (CHECK(message_len != mic.len + plain.len)) { + pr_warn("MIC length mismatch %zu != %u\n", + message_len, mic.len + plain.len); + goto out; + } + + memcpy(buf + offset, plain.data, plain.len); + + /* Generate a MIC generation request. */ + sg_init_one(sg, buf, 1024); + + ret = crypto_krb5_get_mic(krb5, ci, NULL, sg, 1, 1024, + krb5->cksum_len, plain.len); + if (ret < 0) { + CHECK(1); + pr_warn("Get MIC failed %d\n", ret); + goto out; + } + len = ret; + + if (CHECK(len != plain.len + mic.len)) { + pr_warn("MIC length mismatch %zu != %u\n", len, plain.len + mic.len); + goto out; + } + + if (memcmp(buf, mic.data, mic.len) != 0) { + CHECK(1); + pr_warn("MIC mismatch\n"); + pr_warn("BUF %*phN\n", mic.len, buf); + pr_warn("MIC %*phN\n", mic.len, mic.data); + ret = -EKEYREJECTED; + goto out; + } + + /* Generate a verification request. */ + offset = 0; + ret = crypto_krb5_verify_mic(krb5, ci, NULL, sg, 1, &offset, &len); + if (ret < 0) { + CHECK(1); + pr_warn("Verify MIC failed %d\n", ret); + goto out; + } + + if (CHECK(offset != mic.len) || + CHECK(len != plain.len)) + goto out; + + if (memcmp(buf + offset, plain.data, plain.len) != 0) { + CHECK(1); + pr_warn("Plaintext mismatch\n"); + pr_warn("BUF %*phN\n", plain.len, buf + offset); + pr_warn("PT %*phN\n", plain.len, plain.data); + ret = -EKEYREJECTED; + goto out; + } + + ret = 0; + +out: + clear_buf(&mic); + clear_buf(&plain); + clear_buf(&keys); + clear_buf(&K0); + clear_buf(&Kc); + if (ci) + crypto_free_shash(ci); + return ret; +} + +int krb5_selftest(void) +{ + void *buf; + int ret = 0, i; + + buf = kmalloc(4096, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pr_notice("\n"); + pr_notice("Running selftests\n"); + + for (i = 0; krb5_prf_tests[i].name; i++) { + ret = krb5_test_one_prf(&krb5_prf_tests[i]); + if (ret < 0) { + if (ret != -EOPNOTSUPP) + goto out; + pr_notice("Skipping %s\n", krb5_prf_tests[i].name); + } + } + + for (i = 0; krb5_key_tests[i].name; i++) { + ret = krb5_test_one_key(&krb5_key_tests[i]); + if (ret < 0) { + if (ret != -EOPNOTSUPP) + goto out; + pr_notice("Skipping %s\n", krb5_key_tests[i].name); + } + } + + for (i = 0; krb5_enc_tests[i].name; i++) { + memset(buf, 0x5a, 4096); + ret = krb5_test_one_enc(&krb5_enc_tests[i], buf); + if (ret < 0) { + if (ret != -EOPNOTSUPP) + goto out; + pr_notice("Skipping %s\n", krb5_enc_tests[i].name); + } + } + + for (i = 0; krb5_mic_tests[i].name; i++) { + memset(buf, 0x5a, 4096); + ret = krb5_test_one_mic(&krb5_mic_tests[i], buf); + if (ret < 0) { + if (ret != -EOPNOTSUPP) + goto out; + pr_notice("Skipping %s\n", krb5_mic_tests[i].name); + } + } + + ret = 0; +out: + pr_notice("Selftests %s\n", ret == 0 ? "succeeded" : "failed"); + kfree(buf); + return ret; +} diff --git a/crypto/krb5/selftest_data.c b/crypto/krb5/selftest_data.c new file mode 100644 index 000000000000..24447ee8bf07 --- /dev/null +++ b/crypto/krb5/selftest_data.c @@ -0,0 +1,291 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Data for Kerberos library self-testing + * + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "internal.h" + +/* + * Pseudo-random function tests. + */ +const struct krb5_prf_test krb5_prf_tests[] = { + /* rfc8009 Appendix A */ + { + .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128, + .name = "prf", + .key = "3705D96080C17728A0E800EAB6E0D23C", + .octet = "74657374", + .prf = "9D188616F63852FE86915BB840B4A886FF3E6BB0F819B49B893393D393854295", + }, { + .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192, + .name = "prf", + .key = "6D404D37FAF79F9DF0D33568D320669800EB4836472EA8A026D16B7182460C52", + .octet = "74657374", + .prf = + "9801F69A368C2BF675E59521E177D9A07F67EFE1CFDE8D3C8D6F6A0256E3B17D" + "B3C1B62AD1B8553360D17367EB1514D2", + }, + {/* END */} +}; + +/* + * Key derivation tests. + */ +const struct krb5_key_test krb5_key_tests[] = { + /* rfc8009 Appendix A */ + { + .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128, + .name = "key", + .key = "3705D96080C17728A0E800EAB6E0D23C", + .Kc.use = 0x00000002, + .Kc.key = "B31A018A48F54776F403E9A396325DC3", + .Ke.use = 0x00000002, + .Ke.key = "9B197DD1E8C5609D6E67C3E37C62C72E", + .Ki.use = 0x00000002, + .Ki.key = "9FDA0E56AB2D85E1569A688696C26A6C", + }, { + .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192, + .name = "key", + .key = "6D404D37FAF79F9DF0D33568D320669800EB4836472EA8A026D16B7182460C52", + .Kc.use = 0x00000002, + .Kc.key = "EF5718BE86CC84963D8BBB5031E9F5C4BA41F28FAF69E73D", + .Ke.use = 0x00000002, + .Ke.key = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49", + .Ki.use = 0x00000002, + .Ki.key = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F", + }, + /* rfc6803 sec 10 */ + { + .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC, + .name = "key", + .key = "57D0297298FFD9D35DE5A47FB4BDE24B", + .Kc.use = 0x00000002, + .Kc.key = "D155775A209D05F02B38D42A389E5A56", + .Ke.use = 0x00000002, + .Ke.key = "64DF83F85A532F17577D8C37035796AB", + .Ki.use = 0x00000002, + .Ki.key = "3E4FBDF30FB8259C425CB6C96F1F4635", + }, + { + .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC, + .name = "key", + .key = "B9D6828B2056B7BE656D88A123B1FAC68214AC2B727ECF5F69AFE0C4DF2A6D2C", + .Kc.use = 0x00000002, + .Kc.key = "E467F9A9552BC7D3155A6220AF9C19220EEED4FF78B0D1E6A1544991461A9E50", + .Ke.use = 0x00000002, + .Ke.key = "412AEFC362A7285FC3966C6A5181E7605AE675235B6D549FBFC9AB6630A4C604", + .Ki.use = 0x00000002, + .Ki.key = "FA624FA0E523993FA388AEFDC67E67EBCD8C08E8A0246B1D73B0D1DD9FC582B0", + }, + {/* END */} +}; + +/* + * Encryption tests. + */ +const struct krb5_enc_test krb5_enc_tests[] = { + /* rfc8009 Appendix A */ + { + .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128, + .name = "enc no plain", + .plain = "", + .conf = "7E5895EAF2672435BAD817F545A37148", + .Ke = "9B197DD1E8C5609D6E67C3E37C62C72E", + .Ki = "9FDA0E56AB2D85E1569A688696C26A6C", + .ct = "EF85FB890BB8472F4DAB20394DCA781DAD877EDA39D50C870C0D5A0A8E48C718", + }, { + .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128, + .name = "enc plain<block", + .plain = "000102030405", + .conf = "7BCA285E2FD4130FB55B1A5C83BC5B24", + .Ke = "9B197DD1E8C5609D6E67C3E37C62C72E", + .Ki = "9FDA0E56AB2D85E1569A688696C26A6C", + .ct = "84D7F30754ED987BAB0BF3506BEB09CFB55402CEF7E6877CE99E247E52D16ED4421DFDF8976C", + }, { + .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128, + .name = "enc plain==block", + .plain = "000102030405060708090A0B0C0D0E0F", + .conf = "56AB21713FF62C0A1457200F6FA9948F", + .Ke = "9B197DD1E8C5609D6E67C3E37C62C72E", + .Ki = "9FDA0E56AB2D85E1569A688696C26A6C", + .ct = "3517D640F50DDC8AD3628722B3569D2AE07493FA8263254080EA65C1008E8FC295FB4852E7D83E1E7C48C37EEBE6B0D3", + }, { + .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128, + .name = "enc plain>block", + .plain = "000102030405060708090A0B0C0D0E0F1011121314", + .conf = "A7A4E29A4728CE10664FB64E49AD3FAC", + .Ke = "9B197DD1E8C5609D6E67C3E37C62C72E", + .Ki = "9FDA0E56AB2D85E1569A688696C26A6C", + .ct = "720F73B18D9859CD6CCB4346115CD336C70F58EDC0C4437C5573544C31C813BCE1E6D072C186B39A413C2F92CA9B8334A287FFCBFC", + }, { + .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192, + .name = "enc no plain", + .plain = "", + .conf = "F764E9FA15C276478B2C7D0C4E5F58E4", + .Ke = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49", + .Ki = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F", + .ct = "41F53FA5BFE7026D91FAF9BE959195A058707273A96A40F0A01960621AC612748B9BBFBE7EB4CE3C", + }, { + .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192, + .name = "enc plain<block", + .plain = "000102030405", + .conf = "B80D3251C1F6471494256FFE712D0B9A", + .Ke = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49", + .Ki = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F", + .ct = "4ED7B37C2BCAC8F74F23C1CF07E62BC7B75FB3F637B9F559C7F664F69EAB7B6092237526EA0D1F61CB20D69D10F2", + }, { + .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192, + .name = "enc plain==block", + .plain = "000102030405060708090A0B0C0D0E0F", + .conf = "53BF8A0D105265D4E276428624CE5E63", + .Ke = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49", + .Ki = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F", + .ct = "BC47FFEC7998EB91E8115CF8D19DAC4BBBE2E163E87DD37F49BECA92027764F68CF51F14D798C2273F35DF574D1F932E40C4FF255B36A266", + }, { + .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192, + .name = "enc plain>block", + .plain = "000102030405060708090A0B0C0D0E0F1011121314", + .conf = "763E65367E864F02F55153C7E3B58AF1", + .Ke = "56AB22BEE63D82D7BC5227F6773F8EA7A5EB1C825160C38312980C442E5C7E49", + .Ki = "69B16514E3CD8E56B82010D5C73012B622C4D00FFC23ED1F", + .ct = "40013E2DF58E8751957D2878BCD2D6FE101CCFD556CB1EAE79DB3C3EE86429F2B2A602AC86FEF6ECB647D6295FAE077A1FEB517508D2C16B4192E01F62", + }, + /* rfc6803 sec 10 */ + { + .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC, + .name = "enc no plain", + .plain = "", + .conf = "B69822A19A6B09C0EBC8557D1F1B6C0A", + .K0 = "1DC46A8D763F4F93742BCBA3387576C3", + .usage = 0, + .ct = "C466F1871069921EDB7C6FDE244A52DB0BA10EDC197BDB8006658CA3CCCE6EB8", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC, + .name = "enc 1 plain", + .plain = "'1", + .conf = "6F2FC3C2A166FD8898967A83DE9596D9", + .K0 = "5027BC231D0F3A9D23333F1CA6FDBE7C", + .usage = 1, + .ct = "842D21FD950311C0DD464A3F4BE8D6DA88A56D559C9B47D3F9A85067AF661559B8", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC, + .name = "enc 9 plain", + .plain = "'9 bytesss", + .conf = "A5B4A71E077AEEF93C8763C18FDB1F10", + .K0 = "A1BB61E805F9BA6DDE8FDBDDC05CDEA0", + .usage = 2, + .ct = "619FF072E36286FF0A28DEB3A352EC0D0EDF5C5160D663C901758CCF9D1ED33D71DB8F23AABF8348A0", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC, + .name = "enc 13 plain", + .plain = "'13 bytes byte", + .conf = "19FEE40D810C524B5B22F01874C693DA", + .K0 = "2CA27A5FAF5532244506434E1CEF6676", + .usage = 3, + .ct = "B8ECA3167AE6315512E59F98A7C500205E5F63FF3BB389AF1C41A21D640D8615C9ED3FBEB05AB6ACB67689B5EA", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC, + .name = "enc 30 plain", + .plain = "'30 bytes bytes bytes bytes byt", + .conf = "CA7A7AB4BE192DABD603506DB19C39E2", + .K0 = "7824F8C16F83FF354C6BF7515B973F43", + .usage = 4, + .ct = "A26A3905A4FFD5816B7B1E27380D08090C8EC1F304496E1ABDCD2BDCD1DFFC660989E117A713DDBB57A4146C1587CBA4356665591D2240282F5842B105A5", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC, + .name = "enc no plain", + .plain = "", + .conf = "3CBBD2B45917941067F96599BB98926C", + .K0 = "B61C86CC4E5D2757545AD423399FB7031ECAB913CBB900BD7A3C6DD8BF92015B", + .usage = 0, + .ct = "03886D03310B47A6D8F06D7B94D1DD837ECCE315EF652AFF620859D94A259266", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC, + .name = "enc 1 plain", + .plain = "'1", + .conf = "DEF487FCEBE6DE6346D4DA4521BBA2D2", + .K0 = "1B97FE0A190E2021EB30753E1B6E1E77B0754B1D684610355864104963463833", + .usage = 1, + .ct = "2C9C1570133C99BF6A34BC1B0212002FD194338749DB4135497A347CFCD9D18A12", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC, + .name = "enc 9 plain", + .plain = "'9 bytesss", + .conf = "AD4FF904D34E555384B14100FC465F88", + .K0 = "32164C5B434D1D1538E4CFD9BE8040FE8C4AC7ACC4B93D3314D2133668147A05", + .usage = 2, + .ct = "9C6DE75F812DE7ED0D28B2963557A115640998275B0AF5152709913FF52A2A9C8E63B872F92E64C839", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC, + .name = "enc 13 plain", + .plain = "'13 bytes byte", + .conf = "CF9BCA6DF1144E0C0AF9B8F34C90D514", + .K0 = "B038B132CD8E06612267FAB7170066D88AECCBA0B744BFC60DC89BCA182D0715", + .usage = 3, + .ct = "EEEC85A9813CDC536772AB9B42DEFC5706F726E975DDE05A87EB5406EA324CA185C9986B42AABE794B84821BEE", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC, + .name = "enc 30 plain", + .plain = "'30 bytes bytes bytes bytes byt", + .conf = "644DEF38DA35007275878D216855E228", + .K0 = "CCFCD349BF4C6677E86E4B02B8EAB924A546AC731CF9BF6989B996E7D6BFBBA7", + .usage = 4, + .ct = "0E44680985855F2D1F1812529CA83BFD8E349DE6FD9ADA0BAAA048D68E265FEBF34AD1255A344999AD37146887A6C6845731AC7F46376A0504CD06571474", + }, + {/* END */} +}; + +/* + * Checksum generation tests. + */ +const struct krb5_mic_test krb5_mic_tests[] = { + /* rfc8009 Appendix A */ + { + .etype = KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128, + .name = "mic", + .plain = "000102030405060708090A0B0C0D0E0F1011121314", + .Kc = "B31A018A48F54776F403E9A396325DC3", + .mic = "D78367186643D67B411CBA9139FC1DEE", + }, { + .etype = KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192, + .name = "mic", + .plain = "000102030405060708090A0B0C0D0E0F1011121314", + .Kc = "EF5718BE86CC84963D8BBB5031E9F5C4BA41F28FAF69E73D", + .mic = "45EE791567EEFCA37F4AC1E0222DE80D43C3BFA06699672A", + }, + /* rfc6803 sec 10 */ + { + .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC, + .name = "mic abc", + .plain = "'abcdefghijk", + .K0 = "1DC46A8D763F4F93742BCBA3387576C3", + .usage = 7, + .mic = "1178E6C5C47A8C1AE0C4B9C7D4EB7B6B", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC, + .name = "mic ABC", + .plain = "'ABCDEFGHIJKLMNOPQRSTUVWXYZ", + .K0 = "5027BC231D0F3A9D23333F1CA6FDBE7C", + .usage = 8, + .mic = "D1B34F7004A731F23A0C00BF6C3F753A", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC, + .name = "mic 123", + .plain = "'123456789", + .K0 = "B61C86CC4E5D2757545AD423399FB7031ECAB913CBB900BD7A3C6DD8BF92015B", + .usage = 9, + .mic = "87A12CFD2B96214810F01C826E7744B1", + }, { + .etype = KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC, + .name = "mic !@#", + .plain = "'!@#$%^&*()!@#$%^&*()!@#$%^&*()", + .K0 = "32164C5B434D1D1538E4CFD9BE8040FE8C4AC7ACC4B93D3314D2133668147A05", + .usage = 10, + .mic = "3FA0B42355E52B189187294AA252AB64", + }, + {/* END */} +}; diff --git a/crypto/krb5enc.c b/crypto/krb5enc.c new file mode 100644 index 000000000000..d07769bf149e --- /dev/null +++ b/crypto/krb5enc.c @@ -0,0 +1,504 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * AEAD wrapper for Kerberos 5 RFC3961 simplified profile. + * + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * Derived from authenc: + * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au> + */ + +#include <crypto/internal/aead.h> +#include <crypto/internal/hash.h> +#include <crypto/internal/skcipher.h> +#include <crypto/authenc.h> +#include <crypto/scatterwalk.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/rtnetlink.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +struct krb5enc_instance_ctx { + struct crypto_ahash_spawn auth; + struct crypto_skcipher_spawn enc; + unsigned int reqoff; +}; + +struct krb5enc_ctx { + struct crypto_ahash *auth; + struct crypto_skcipher *enc; +}; + +struct krb5enc_request_ctx { + struct scatterlist src[2]; + struct scatterlist dst[2]; + char tail[]; +}; + +static void krb5enc_request_complete(struct aead_request *req, int err) +{ + if (err != -EINPROGRESS) + aead_request_complete(req, err); +} + +/** + * crypto_krb5enc_extractkeys - Extract Ke and Ki keys from the key blob. + * @keys: Where to put the key sizes and pointers + * @key: Encoded key material + * @keylen: Amount of key material + * + * Decode the key blob we're given. It starts with an rtattr that indicates + * the format and the length. Format CRYPTO_AUTHENC_KEYA_PARAM is: + * + * rtattr || __be32 enckeylen || authkey || enckey + * + * Note that the rtattr is in cpu-endian form, unlike enckeylen. This must be + * handled correctly in static testmgr data. + */ +int crypto_krb5enc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, + unsigned int keylen) +{ + struct rtattr *rta = (struct rtattr *)key; + struct crypto_authenc_key_param *param; + + if (!RTA_OK(rta, keylen)) + return -EINVAL; + if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) + return -EINVAL; + + /* + * RTA_OK() didn't align the rtattr's payload when validating that it + * fits in the buffer. Yet, the keys should start on the next 4-byte + * aligned boundary. To avoid confusion, require that the rtattr + * payload be exactly the param struct, which has a 4-byte aligned size. + */ + if (RTA_PAYLOAD(rta) != sizeof(*param)) + return -EINVAL; + BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO); + + param = RTA_DATA(rta); + keys->enckeylen = be32_to_cpu(param->enckeylen); + + key += rta->rta_len; + keylen -= rta->rta_len; + + if (keylen < keys->enckeylen) + return -EINVAL; + + keys->authkeylen = keylen - keys->enckeylen; + keys->authkey = key; + keys->enckey = key + keys->authkeylen; + return 0; +} +EXPORT_SYMBOL(crypto_krb5enc_extractkeys); + +static int krb5enc_setkey(struct crypto_aead *krb5enc, const u8 *key, + unsigned int keylen) +{ + struct crypto_authenc_keys keys; + struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc); + struct crypto_skcipher *enc = ctx->enc; + struct crypto_ahash *auth = ctx->auth; + unsigned int flags = crypto_aead_get_flags(krb5enc); + int err = -EINVAL; + + if (crypto_krb5enc_extractkeys(&keys, key, keylen) != 0) + goto out; + + crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK); + crypto_ahash_set_flags(auth, flags & CRYPTO_TFM_REQ_MASK); + err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen); + if (err) + goto out; + + crypto_skcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK); + crypto_skcipher_set_flags(enc, flags & CRYPTO_TFM_REQ_MASK); + err = crypto_skcipher_setkey(enc, keys.enckey, keys.enckeylen); +out: + memzero_explicit(&keys, sizeof(keys)); + return err; +} + +static void krb5enc_encrypt_done(void *data, int err) +{ + struct aead_request *req = data; + + krb5enc_request_complete(req, err); +} + +/* + * Start the encryption of the plaintext. We skip over the associated data as + * that only gets included in the hash. + */ +static int krb5enc_dispatch_encrypt(struct aead_request *req, + unsigned int flags) +{ + struct crypto_aead *krb5enc = crypto_aead_reqtfm(req); + struct aead_instance *inst = aead_alg_instance(krb5enc); + struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc); + struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst); + struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req); + struct crypto_skcipher *enc = ctx->enc; + struct skcipher_request *skreq = (void *)(areq_ctx->tail + + ictx->reqoff); + struct scatterlist *src, *dst; + + src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen); + if (req->src == req->dst) + dst = src; + else + dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen); + + skcipher_request_set_tfm(skreq, enc); + skcipher_request_set_callback(skreq, aead_request_flags(req), + krb5enc_encrypt_done, req); + skcipher_request_set_crypt(skreq, src, dst, req->cryptlen, req->iv); + + return crypto_skcipher_encrypt(skreq); +} + +/* + * Insert the hash into the checksum field in the destination buffer directly + * after the encrypted region. + */ +static void krb5enc_insert_checksum(struct aead_request *req, u8 *hash) +{ + struct crypto_aead *krb5enc = crypto_aead_reqtfm(req); + + scatterwalk_map_and_copy(hash, req->dst, + req->assoclen + req->cryptlen, + crypto_aead_authsize(krb5enc), 1); +} + +/* + * Upon completion of an asynchronous digest, transfer the hash to the checksum + * field. + */ +static void krb5enc_encrypt_ahash_done(void *data, int err) +{ + struct aead_request *req = data; + struct crypto_aead *krb5enc = crypto_aead_reqtfm(req); + struct aead_instance *inst = aead_alg_instance(krb5enc); + struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst); + struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req); + struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff); + + if (err) + return krb5enc_request_complete(req, err); + + krb5enc_insert_checksum(req, ahreq->result); + + err = krb5enc_dispatch_encrypt(req, 0); + if (err != -EINPROGRESS) + aead_request_complete(req, err); +} + +/* + * Start the digest of the plaintext for encryption. In theory, this could be + * run in parallel with the encryption, provided the src and dst buffers don't + * overlap. + */ +static int krb5enc_dispatch_encrypt_hash(struct aead_request *req) +{ + struct crypto_aead *krb5enc = crypto_aead_reqtfm(req); + struct aead_instance *inst = aead_alg_instance(krb5enc); + struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc); + struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst); + struct crypto_ahash *auth = ctx->auth; + struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req); + struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff); + u8 *hash = areq_ctx->tail; + int err; + + ahash_request_set_callback(ahreq, aead_request_flags(req), + krb5enc_encrypt_ahash_done, req); + ahash_request_set_tfm(ahreq, auth); + ahash_request_set_crypt(ahreq, req->src, hash, req->assoclen + req->cryptlen); + + err = crypto_ahash_digest(ahreq); + if (err) + return err; + + krb5enc_insert_checksum(req, hash); + return 0; +} + +/* + * Process an encryption operation. We can perform the cipher and the hash in + * parallel, provided the src and dst buffers are separate. + */ +static int krb5enc_encrypt(struct aead_request *req) +{ + int err; + + err = krb5enc_dispatch_encrypt_hash(req); + if (err < 0) + return err; + + return krb5enc_dispatch_encrypt(req, aead_request_flags(req)); +} + +static int krb5enc_verify_hash(struct aead_request *req) +{ + struct crypto_aead *krb5enc = crypto_aead_reqtfm(req); + struct aead_instance *inst = aead_alg_instance(krb5enc); + struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst); + struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req); + struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff); + unsigned int authsize = crypto_aead_authsize(krb5enc); + u8 *calc_hash = areq_ctx->tail; + u8 *msg_hash = areq_ctx->tail + authsize; + + scatterwalk_map_and_copy(msg_hash, req->src, ahreq->nbytes, authsize, 0); + + if (crypto_memneq(msg_hash, calc_hash, authsize)) + return -EBADMSG; + return 0; +} + +static void krb5enc_decrypt_hash_done(void *data, int err) +{ + struct aead_request *req = data; + + if (err) + return krb5enc_request_complete(req, err); + + err = krb5enc_verify_hash(req); + krb5enc_request_complete(req, err); +} + +/* + * Dispatch the hashing of the plaintext after we've done the decryption. + */ +static int krb5enc_dispatch_decrypt_hash(struct aead_request *req) +{ + struct crypto_aead *krb5enc = crypto_aead_reqtfm(req); + struct aead_instance *inst = aead_alg_instance(krb5enc); + struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc); + struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst); + struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req); + struct ahash_request *ahreq = (void *)(areq_ctx->tail + ictx->reqoff); + struct crypto_ahash *auth = ctx->auth; + unsigned int authsize = crypto_aead_authsize(krb5enc); + u8 *hash = areq_ctx->tail; + int err; + + ahash_request_set_tfm(ahreq, auth); + ahash_request_set_crypt(ahreq, req->dst, hash, + req->assoclen + req->cryptlen - authsize); + ahash_request_set_callback(ahreq, aead_request_flags(req), + krb5enc_decrypt_hash_done, req); + + err = crypto_ahash_digest(ahreq); + if (err < 0) + return err; + + return krb5enc_verify_hash(req); +} + +/* + * Dispatch the decryption of the ciphertext. + */ +static int krb5enc_dispatch_decrypt(struct aead_request *req) +{ + struct crypto_aead *krb5enc = crypto_aead_reqtfm(req); + struct aead_instance *inst = aead_alg_instance(krb5enc); + struct krb5enc_ctx *ctx = crypto_aead_ctx(krb5enc); + struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst); + struct krb5enc_request_ctx *areq_ctx = aead_request_ctx(req); + struct skcipher_request *skreq = (void *)(areq_ctx->tail + + ictx->reqoff); + unsigned int authsize = crypto_aead_authsize(krb5enc); + struct scatterlist *src, *dst; + + src = scatterwalk_ffwd(areq_ctx->src, req->src, req->assoclen); + dst = src; + + if (req->src != req->dst) + dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen); + + skcipher_request_set_tfm(skreq, ctx->enc); + skcipher_request_set_callback(skreq, aead_request_flags(req), + req->base.complete, req->base.data); + skcipher_request_set_crypt(skreq, src, dst, + req->cryptlen - authsize, req->iv); + + return crypto_skcipher_decrypt(skreq); +} + +static int krb5enc_decrypt(struct aead_request *req) +{ + int err; + + err = krb5enc_dispatch_decrypt(req); + if (err < 0) + return err; + + return krb5enc_dispatch_decrypt_hash(req); +} + +static int krb5enc_init_tfm(struct crypto_aead *tfm) +{ + struct aead_instance *inst = aead_alg_instance(tfm); + struct krb5enc_instance_ctx *ictx = aead_instance_ctx(inst); + struct krb5enc_ctx *ctx = crypto_aead_ctx(tfm); + struct crypto_ahash *auth; + struct crypto_skcipher *enc; + int err; + + auth = crypto_spawn_ahash(&ictx->auth); + if (IS_ERR(auth)) + return PTR_ERR(auth); + + enc = crypto_spawn_skcipher(&ictx->enc); + err = PTR_ERR(enc); + if (IS_ERR(enc)) + goto err_free_ahash; + + ctx->auth = auth; + ctx->enc = enc; + + crypto_aead_set_reqsize( + tfm, + sizeof(struct krb5enc_request_ctx) + + ictx->reqoff + /* Space for two checksums */ + umax(sizeof(struct ahash_request) + crypto_ahash_reqsize(auth), + sizeof(struct skcipher_request) + crypto_skcipher_reqsize(enc))); + + return 0; + +err_free_ahash: + crypto_free_ahash(auth); + return err; +} + +static void krb5enc_exit_tfm(struct crypto_aead *tfm) +{ + struct krb5enc_ctx *ctx = crypto_aead_ctx(tfm); + + crypto_free_ahash(ctx->auth); + crypto_free_skcipher(ctx->enc); +} + +static void krb5enc_free(struct aead_instance *inst) +{ + struct krb5enc_instance_ctx *ctx = aead_instance_ctx(inst); + + crypto_drop_skcipher(&ctx->enc); + crypto_drop_ahash(&ctx->auth); + kfree(inst); +} + +/* + * Create an instance of a template for a specific hash and cipher pair. + */ +static int krb5enc_create(struct crypto_template *tmpl, struct rtattr **tb) +{ + struct krb5enc_instance_ctx *ictx; + struct skcipher_alg_common *enc; + struct hash_alg_common *auth; + struct aead_instance *inst; + struct crypto_alg *auth_base; + u32 mask; + int err; + + err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); + if (err) { + pr_err("attr_type failed\n"); + return err; + } + + inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); + if (!inst) + return -ENOMEM; + ictx = aead_instance_ctx(inst); + + err = crypto_grab_ahash(&ictx->auth, aead_crypto_instance(inst), + crypto_attr_alg_name(tb[1]), 0, mask); + if (err) { + pr_err("grab ahash failed\n"); + goto err_free_inst; + } + auth = crypto_spawn_ahash_alg(&ictx->auth); + auth_base = &auth->base; + + err = crypto_grab_skcipher(&ictx->enc, aead_crypto_instance(inst), + crypto_attr_alg_name(tb[2]), 0, mask); + if (err) { + pr_err("grab skcipher failed\n"); + goto err_free_inst; + } + enc = crypto_spawn_skcipher_alg_common(&ictx->enc); + + ictx->reqoff = 2 * auth->digestsize; + + err = -ENAMETOOLONG; + if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, + "krb5enc(%s,%s)", auth_base->cra_name, + enc->base.cra_name) >= + CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, + "krb5enc(%s,%s)", auth_base->cra_driver_name, + enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) + goto err_free_inst; + + inst->alg.base.cra_priority = enc->base.cra_priority * 10 + + auth_base->cra_priority; + inst->alg.base.cra_blocksize = enc->base.cra_blocksize; + inst->alg.base.cra_alignmask = enc->base.cra_alignmask; + inst->alg.base.cra_ctxsize = sizeof(struct krb5enc_ctx); + + inst->alg.ivsize = enc->ivsize; + inst->alg.chunksize = enc->chunksize; + inst->alg.maxauthsize = auth->digestsize; + + inst->alg.init = krb5enc_init_tfm; + inst->alg.exit = krb5enc_exit_tfm; + + inst->alg.setkey = krb5enc_setkey; + inst->alg.encrypt = krb5enc_encrypt; + inst->alg.decrypt = krb5enc_decrypt; + + inst->free = krb5enc_free; + + err = aead_register_instance(tmpl, inst); + if (err) { + pr_err("ref failed\n"); + goto err_free_inst; + } + + return 0; + +err_free_inst: + krb5enc_free(inst); + return err; +} + +static struct crypto_template crypto_krb5enc_tmpl = { + .name = "krb5enc", + .create = krb5enc_create, + .module = THIS_MODULE, +}; + +static int __init crypto_krb5enc_module_init(void) +{ + return crypto_register_template(&crypto_krb5enc_tmpl); +} + +static void __exit crypto_krb5enc_module_exit(void) +{ + crypto_unregister_template(&crypto_krb5enc_tmpl); +} + +subsys_initcall(crypto_krb5enc_module_init); +module_exit(crypto_krb5enc_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Simple AEAD wrapper for Kerberos 5 RFC3961"); +MODULE_ALIAS_CRYPTO("krb5enc"); diff --git a/crypto/lrw.c b/crypto/lrw.c index e216fbf2b786..391ae0f7641f 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -167,7 +167,7 @@ static int lrw_xor_tweak(struct skcipher_request *req, bool second_pass) while (w.nbytes) { unsigned int avail = w.nbytes; - be128 *wsrc; + const be128 *wsrc; be128 *wdst; wsrc = w.src.virt.addr; diff --git a/crypto/lz4.c b/crypto/lz4.c index 0606f8862e78..82588607fb2e 100644 --- a/crypto/lz4.c +++ b/crypto/lz4.c @@ -16,7 +16,7 @@ struct lz4_ctx { void *lz4_comp_mem; }; -static void *lz4_alloc_ctx(struct crypto_scomp *tfm) +static void *lz4_alloc_ctx(void) { void *ctx; @@ -27,29 +27,11 @@ static void *lz4_alloc_ctx(struct crypto_scomp *tfm) return ctx; } -static int lz4_init(struct crypto_tfm *tfm) -{ - struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); - - ctx->lz4_comp_mem = lz4_alloc_ctx(NULL); - if (IS_ERR(ctx->lz4_comp_mem)) - return -ENOMEM; - - return 0; -} - -static void lz4_free_ctx(struct crypto_scomp *tfm, void *ctx) +static void lz4_free_ctx(void *ctx) { vfree(ctx); } -static void lz4_exit(struct crypto_tfm *tfm) -{ - struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); - - lz4_free_ctx(NULL, ctx->lz4_comp_mem); -} - static int __lz4_compress_crypto(const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) { @@ -70,14 +52,6 @@ static int lz4_scompress(struct crypto_scomp *tfm, const u8 *src, return __lz4_compress_crypto(src, slen, dst, dlen, ctx); } -static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) -{ - struct lz4_ctx *ctx = crypto_tfm_ctx(tfm); - - return __lz4_compress_crypto(src, slen, dst, dlen, ctx->lz4_comp_mem); -} - static int __lz4_decompress_crypto(const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) { @@ -97,26 +71,6 @@ static int lz4_sdecompress(struct crypto_scomp *tfm, const u8 *src, return __lz4_decompress_crypto(src, slen, dst, dlen, NULL); } -static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, - unsigned int *dlen) -{ - return __lz4_decompress_crypto(src, slen, dst, dlen, NULL); -} - -static struct crypto_alg alg_lz4 = { - .cra_name = "lz4", - .cra_driver_name = "lz4-generic", - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct lz4_ctx), - .cra_module = THIS_MODULE, - .cra_init = lz4_init, - .cra_exit = lz4_exit, - .cra_u = { .compress = { - .coa_compress = lz4_compress_crypto, - .coa_decompress = lz4_decompress_crypto } } -}; - static struct scomp_alg scomp = { .alloc_ctx = lz4_alloc_ctx, .free_ctx = lz4_free_ctx, @@ -131,24 +85,11 @@ static struct scomp_alg scomp = { static int __init lz4_mod_init(void) { - int ret; - - ret = crypto_register_alg(&alg_lz4); - if (ret) - return ret; - - ret = crypto_register_scomp(&scomp); - if (ret) { - crypto_unregister_alg(&alg_lz4); - return ret; - } - - return ret; + return crypto_register_scomp(&scomp); } static void __exit lz4_mod_fini(void) { - crypto_unregister_alg(&alg_lz4); crypto_unregister_scomp(&scomp); } diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c index d7cc94aa2fcf..997e76c0183a 100644 --- a/crypto/lz4hc.c +++ b/crypto/lz4hc.c @@ -4,18 +4,17 @@ * * Copyright (c) 2013 Chanho Min <chanho.min@lge.com> */ +#include <crypto/internal/scompress.h> #include <linux/init.h> #include <linux/module.h> -#include <linux/crypto.h> #include <linux/vmalloc.h> #include <linux/lz4.h> -#include <crypto/internal/scompress.h> struct lz4hc_ctx { void *lz4hc_comp_mem; }; -static void *lz4hc_alloc_ctx(struct crypto_scomp *tfm) +static void *lz4hc_alloc_ctx(void) { void *ctx; @@ -26,29 +25,11 @@ static void *lz4hc_alloc_ctx(struct crypto_scomp *tfm) return ctx; } -static int lz4hc_init(struct crypto_tfm *tfm) -{ - struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); - - ctx->lz4hc_comp_mem = lz4hc_alloc_ctx(NULL); - if (IS_ERR(ctx->lz4hc_comp_mem)) - return -ENOMEM; - - return 0; -} - -static void lz4hc_free_ctx(struct crypto_scomp *tfm, void *ctx) +static void lz4hc_free_ctx(void *ctx) { vfree(ctx); } -static void lz4hc_exit(struct crypto_tfm *tfm) -{ - struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); - - lz4hc_free_ctx(NULL, ctx->lz4hc_comp_mem); -} - static int __lz4hc_compress_crypto(const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) { @@ -69,16 +50,6 @@ static int lz4hc_scompress(struct crypto_scomp *tfm, const u8 *src, return __lz4hc_compress_crypto(src, slen, dst, dlen, ctx); } -static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, - unsigned int *dlen) -{ - struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm); - - return __lz4hc_compress_crypto(src, slen, dst, dlen, - ctx->lz4hc_comp_mem); -} - static int __lz4hc_decompress_crypto(const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) { @@ -98,26 +69,6 @@ static int lz4hc_sdecompress(struct crypto_scomp *tfm, const u8 *src, return __lz4hc_decompress_crypto(src, slen, dst, dlen, NULL); } -static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, - unsigned int *dlen) -{ - return __lz4hc_decompress_crypto(src, slen, dst, dlen, NULL); -} - -static struct crypto_alg alg_lz4hc = { - .cra_name = "lz4hc", - .cra_driver_name = "lz4hc-generic", - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct lz4hc_ctx), - .cra_module = THIS_MODULE, - .cra_init = lz4hc_init, - .cra_exit = lz4hc_exit, - .cra_u = { .compress = { - .coa_compress = lz4hc_compress_crypto, - .coa_decompress = lz4hc_decompress_crypto } } -}; - static struct scomp_alg scomp = { .alloc_ctx = lz4hc_alloc_ctx, .free_ctx = lz4hc_free_ctx, @@ -132,24 +83,11 @@ static struct scomp_alg scomp = { static int __init lz4hc_mod_init(void) { - int ret; - - ret = crypto_register_alg(&alg_lz4hc); - if (ret) - return ret; - - ret = crypto_register_scomp(&scomp); - if (ret) { - crypto_unregister_alg(&alg_lz4hc); - return ret; - } - - return ret; + return crypto_register_scomp(&scomp); } static void __exit lz4hc_mod_fini(void) { - crypto_unregister_alg(&alg_lz4hc); crypto_unregister_scomp(&scomp); } diff --git a/crypto/lzo-rle.c b/crypto/lzo-rle.c index 0631d975bfac..b1350ae278b8 100644 --- a/crypto/lzo-rle.c +++ b/crypto/lzo-rle.c @@ -3,19 +3,17 @@ * Cryptographic API. */ +#include <crypto/internal/scompress.h> #include <linux/init.h> -#include <linux/module.h> -#include <linux/crypto.h> -#include <linux/vmalloc.h> -#include <linux/mm.h> #include <linux/lzo.h> -#include <crypto/internal/scompress.h> +#include <linux/module.h> +#include <linux/slab.h> struct lzorle_ctx { void *lzorle_comp_mem; }; -static void *lzorle_alloc_ctx(struct crypto_scomp *tfm) +static void *lzorle_alloc_ctx(void) { void *ctx; @@ -26,36 +24,18 @@ static void *lzorle_alloc_ctx(struct crypto_scomp *tfm) return ctx; } -static int lzorle_init(struct crypto_tfm *tfm) -{ - struct lzorle_ctx *ctx = crypto_tfm_ctx(tfm); - - ctx->lzorle_comp_mem = lzorle_alloc_ctx(NULL); - if (IS_ERR(ctx->lzorle_comp_mem)) - return -ENOMEM; - - return 0; -} - -static void lzorle_free_ctx(struct crypto_scomp *tfm, void *ctx) +static void lzorle_free_ctx(void *ctx) { kvfree(ctx); } -static void lzorle_exit(struct crypto_tfm *tfm) -{ - struct lzorle_ctx *ctx = crypto_tfm_ctx(tfm); - - lzorle_free_ctx(NULL, ctx->lzorle_comp_mem); -} - static int __lzorle_compress(const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) { size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ int err; - err = lzorle1x_1_compress(src, slen, dst, &tmp_len, ctx); + err = lzorle1x_1_compress_safe(src, slen, dst, &tmp_len, ctx); if (err != LZO_E_OK) return -EINVAL; @@ -64,14 +44,6 @@ static int __lzorle_compress(const u8 *src, unsigned int slen, return 0; } -static int lzorle_compress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) -{ - struct lzorle_ctx *ctx = crypto_tfm_ctx(tfm); - - return __lzorle_compress(src, slen, dst, dlen, ctx->lzorle_comp_mem); -} - static int lzorle_scompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) @@ -94,12 +66,6 @@ static int __lzorle_decompress(const u8 *src, unsigned int slen, return 0; } -static int lzorle_decompress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) -{ - return __lzorle_decompress(src, slen, dst, dlen); -} - static int lzorle_sdecompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) @@ -107,19 +73,6 @@ static int lzorle_sdecompress(struct crypto_scomp *tfm, const u8 *src, return __lzorle_decompress(src, slen, dst, dlen); } -static struct crypto_alg alg = { - .cra_name = "lzo-rle", - .cra_driver_name = "lzo-rle-generic", - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct lzorle_ctx), - .cra_module = THIS_MODULE, - .cra_init = lzorle_init, - .cra_exit = lzorle_exit, - .cra_u = { .compress = { - .coa_compress = lzorle_compress, - .coa_decompress = lzorle_decompress } } -}; - static struct scomp_alg scomp = { .alloc_ctx = lzorle_alloc_ctx, .free_ctx = lzorle_free_ctx, @@ -134,24 +87,11 @@ static struct scomp_alg scomp = { static int __init lzorle_mod_init(void) { - int ret; - - ret = crypto_register_alg(&alg); - if (ret) - return ret; - - ret = crypto_register_scomp(&scomp); - if (ret) { - crypto_unregister_alg(&alg); - return ret; - } - - return ret; + return crypto_register_scomp(&scomp); } static void __exit lzorle_mod_fini(void) { - crypto_unregister_alg(&alg); crypto_unregister_scomp(&scomp); } diff --git a/crypto/lzo.c b/crypto/lzo.c index ebda132dd22b..dfe5a07ca35f 100644 --- a/crypto/lzo.c +++ b/crypto/lzo.c @@ -3,19 +3,17 @@ * Cryptographic API. */ +#include <crypto/internal/scompress.h> #include <linux/init.h> -#include <linux/module.h> -#include <linux/crypto.h> -#include <linux/vmalloc.h> -#include <linux/mm.h> #include <linux/lzo.h> -#include <crypto/internal/scompress.h> +#include <linux/module.h> +#include <linux/slab.h> struct lzo_ctx { void *lzo_comp_mem; }; -static void *lzo_alloc_ctx(struct crypto_scomp *tfm) +static void *lzo_alloc_ctx(void) { void *ctx; @@ -26,36 +24,18 @@ static void *lzo_alloc_ctx(struct crypto_scomp *tfm) return ctx; } -static int lzo_init(struct crypto_tfm *tfm) -{ - struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); - - ctx->lzo_comp_mem = lzo_alloc_ctx(NULL); - if (IS_ERR(ctx->lzo_comp_mem)) - return -ENOMEM; - - return 0; -} - -static void lzo_free_ctx(struct crypto_scomp *tfm, void *ctx) +static void lzo_free_ctx(void *ctx) { kvfree(ctx); } -static void lzo_exit(struct crypto_tfm *tfm) -{ - struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); - - lzo_free_ctx(NULL, ctx->lzo_comp_mem); -} - static int __lzo_compress(const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) { size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ int err; - err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx); + err = lzo1x_1_compress_safe(src, slen, dst, &tmp_len, ctx); if (err != LZO_E_OK) return -EINVAL; @@ -64,14 +44,6 @@ static int __lzo_compress(const u8 *src, unsigned int slen, return 0; } -static int lzo_compress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) -{ - struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); - - return __lzo_compress(src, slen, dst, dlen, ctx->lzo_comp_mem); -} - static int lzo_scompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) @@ -94,12 +66,6 @@ static int __lzo_decompress(const u8 *src, unsigned int slen, return 0; } -static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) -{ - return __lzo_decompress(src, slen, dst, dlen); -} - static int lzo_sdecompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) @@ -107,19 +73,6 @@ static int lzo_sdecompress(struct crypto_scomp *tfm, const u8 *src, return __lzo_decompress(src, slen, dst, dlen); } -static struct crypto_alg alg = { - .cra_name = "lzo", - .cra_driver_name = "lzo-generic", - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct lzo_ctx), - .cra_module = THIS_MODULE, - .cra_init = lzo_init, - .cra_exit = lzo_exit, - .cra_u = { .compress = { - .coa_compress = lzo_compress, - .coa_decompress = lzo_decompress } } -}; - static struct scomp_alg scomp = { .alloc_ctx = lzo_alloc_ctx, .free_ctx = lzo_free_ctx, @@ -134,24 +87,11 @@ static struct scomp_alg scomp = { static int __init lzo_mod_init(void) { - int ret; - - ret = crypto_register_alg(&alg); - if (ret) - return ret; - - ret = crypto_register_scomp(&scomp); - if (ret) { - crypto_unregister_alg(&alg); - return ret; - } - - return ret; + return crypto_register_scomp(&scomp); } static void __exit lzo_mod_fini(void) { - crypto_unregister_alg(&alg); crypto_unregister_scomp(&scomp); } diff --git a/crypto/pcbc.c b/crypto/pcbc.c index cbfb3ac14b3a..9d2e56d6744a 100644 --- a/crypto/pcbc.c +++ b/crypto/pcbc.c @@ -22,8 +22,8 @@ static int crypto_pcbc_encrypt_segment(struct skcipher_request *req, struct crypto_cipher *tfm) { int bsize = crypto_cipher_blocksize(tfm); + const u8 *src = walk->src.virt.addr; unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; u8 * const iv = walk->iv; @@ -45,17 +45,17 @@ static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req, { int bsize = crypto_cipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; u8 * const iv = walk->iv; u8 tmpbuf[MAX_CIPHER_BLOCKSIZE]; do { - memcpy(tmpbuf, src, bsize); - crypto_xor(iv, src, bsize); - crypto_cipher_encrypt_one(tfm, src, iv); - crypto_xor_cpy(iv, tmpbuf, src, bsize); + memcpy(tmpbuf, dst, bsize); + crypto_xor(iv, dst, bsize); + crypto_cipher_encrypt_one(tfm, dst, iv); + crypto_xor_cpy(iv, tmpbuf, dst, bsize); - src += bsize; + dst += bsize; } while ((nbytes -= bsize) >= bsize); return nbytes; @@ -89,8 +89,8 @@ static int crypto_pcbc_decrypt_segment(struct skcipher_request *req, struct crypto_cipher *tfm) { int bsize = crypto_cipher_blocksize(tfm); + const u8 *src = walk->src.virt.addr; unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; u8 * const iv = walk->iv; @@ -112,17 +112,17 @@ static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req, { int bsize = crypto_cipher_blocksize(tfm); unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; u8 * const iv = walk->iv; u8 tmpbuf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(u32)); do { - memcpy(tmpbuf, src, bsize); - crypto_cipher_decrypt_one(tfm, src, src); - crypto_xor(src, iv, bsize); - crypto_xor_cpy(iv, src, tmpbuf, bsize); + memcpy(tmpbuf, dst, bsize); + crypto_cipher_decrypt_one(tfm, dst, dst); + crypto_xor(dst, iv, bsize); + crypto_xor_cpy(iv, dst, tmpbuf, bsize); - src += bsize; + dst += bsize; } while ((nbytes -= bsize) >= bsize); return nbytes; diff --git a/crypto/proc.c b/crypto/proc.c index 522b27d90d29..82f15b967e85 100644 --- a/crypto/proc.c +++ b/crypto/proc.c @@ -72,9 +72,6 @@ static int c_show(struct seq_file *m, void *p) seq_printf(m, "max keysize : %u\n", alg->cra_cipher.cia_max_keysize); break; - case CRYPTO_ALG_TYPE_COMPRESS: - seq_printf(m, "type : compression\n"); - break; default: seq_printf(m, "type : unknown\n"); break; diff --git a/crypto/rsassa-pkcs1.c b/crypto/rsassa-pkcs1.c index f68ffd338f48..d01ac75635e0 100644 --- a/crypto/rsassa-pkcs1.c +++ b/crypto/rsassa-pkcs1.c @@ -210,7 +210,7 @@ static int rsassa_pkcs1_sign(struct crypto_sig *tfm, memset(dst, 0, pad_len); } - return 0; + return ctx->key_size; } static int rsassa_pkcs1_verify(struct crypto_sig *tfm, diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index 16f6ba896fb6..8225801488d5 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c @@ -15,59 +15,103 @@ #include <linux/module.h> #include <linux/scatterlist.h> -static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out) +void scatterwalk_skip(struct scatter_walk *walk, unsigned int nbytes) { - void *src = out ? buf : sgdata; - void *dst = out ? sgdata : buf; + struct scatterlist *sg = walk->sg; - memcpy(dst, src, nbytes); + nbytes += walk->offset - sg->offset; + + while (nbytes > sg->length) { + nbytes -= sg->length; + sg = sg_next(sg); + } + walk->sg = sg; + walk->offset = sg->offset + nbytes; } +EXPORT_SYMBOL_GPL(scatterwalk_skip); -void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, - size_t nbytes, int out) +inline void memcpy_from_scatterwalk(void *buf, struct scatter_walk *walk, + unsigned int nbytes) { - for (;;) { - unsigned int len_this_page = scatterwalk_pagelen(walk); - u8 *vaddr; - - if (len_this_page > nbytes) - len_this_page = nbytes; - - if (out != 2) { - vaddr = scatterwalk_map(walk); - memcpy_dir(buf, vaddr, len_this_page, out); - scatterwalk_unmap(vaddr); - } + do { + unsigned int to_copy; + + to_copy = scatterwalk_next(walk, nbytes); + memcpy(buf, walk->addr, to_copy); + scatterwalk_done_src(walk, to_copy); + buf += to_copy; + nbytes -= to_copy; + } while (nbytes); +} +EXPORT_SYMBOL_GPL(memcpy_from_scatterwalk); - scatterwalk_advance(walk, len_this_page); +inline void memcpy_to_scatterwalk(struct scatter_walk *walk, const void *buf, + unsigned int nbytes) +{ + do { + unsigned int to_copy; + + to_copy = scatterwalk_next(walk, nbytes); + memcpy(walk->addr, buf, to_copy); + scatterwalk_done_dst(walk, to_copy); + buf += to_copy; + nbytes -= to_copy; + } while (nbytes); +} +EXPORT_SYMBOL_GPL(memcpy_to_scatterwalk); - if (nbytes == len_this_page) - break; +void memcpy_from_sglist(void *buf, struct scatterlist *sg, + unsigned int start, unsigned int nbytes) +{ + struct scatter_walk walk; - buf += len_this_page; - nbytes -= len_this_page; + if (unlikely(nbytes == 0)) /* in case sg == NULL */ + return; - scatterwalk_pagedone(walk, out & 1, 1); - } + scatterwalk_start_at_pos(&walk, sg, start); + memcpy_from_scatterwalk(buf, &walk, nbytes); } -EXPORT_SYMBOL_GPL(scatterwalk_copychunks); +EXPORT_SYMBOL_GPL(memcpy_from_sglist); -void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, - unsigned int start, unsigned int nbytes, int out) +void memcpy_to_sglist(struct scatterlist *sg, unsigned int start, + const void *buf, unsigned int nbytes) { struct scatter_walk walk; - struct scatterlist tmp[2]; - if (!nbytes) + if (unlikely(nbytes == 0)) /* in case sg == NULL */ return; - sg = scatterwalk_ffwd(tmp, sg, start); + scatterwalk_start_at_pos(&walk, sg, start); + memcpy_to_scatterwalk(&walk, buf, nbytes); +} +EXPORT_SYMBOL_GPL(memcpy_to_sglist); + +void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes) +{ + struct scatter_walk swalk; + struct scatter_walk dwalk; + + if (unlikely(nbytes == 0)) /* in case sg == NULL */ + return; - scatterwalk_start(&walk, sg); - scatterwalk_copychunks(buf, &walk, nbytes, out); - scatterwalk_done(&walk, out, 0); + scatterwalk_start(&swalk, src); + scatterwalk_start(&dwalk, dst); + + do { + unsigned int slen, dlen; + unsigned int len; + + slen = scatterwalk_next(&swalk, nbytes); + dlen = scatterwalk_next(&dwalk, nbytes); + len = min(slen, dlen); + memcpy(dwalk.addr, swalk.addr, len); + scatterwalk_done_dst(&dwalk, len); + scatterwalk_done_src(&swalk, len); + nbytes -= len; + } while (nbytes); } -EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy); +EXPORT_SYMBOL_GPL(memcpy_sglist); struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], struct scatterlist *src, diff --git a/crypto/scompress.c b/crypto/scompress.c index 1cef6bb06a81..d435d4b24469 100644 --- a/crypto/scompress.c +++ b/crypto/scompress.c @@ -12,8 +12,10 @@ #include <crypto/scatterwalk.h> #include <linux/cryptouser.h> #include <linux/err.h> +#include <linux/highmem.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/overflow.h> #include <linux/scatterlist.h> #include <linux/seq_file.h> #include <linux/slab.h> @@ -23,9 +25,14 @@ #include "compress.h" +#define SCOMP_SCRATCH_SIZE 65400 + struct scomp_scratch { spinlock_t lock; - void *src; + union { + void *src; + unsigned long saddr; + }; void *dst; }; @@ -66,7 +73,7 @@ static void crypto_scomp_free_scratches(void) for_each_possible_cpu(i) { scratch = per_cpu_ptr(&scomp_scratch, i); - vfree(scratch->src); + free_page(scratch->saddr); vfree(scratch->dst); scratch->src = NULL; scratch->dst = NULL; @@ -79,14 +86,15 @@ static int crypto_scomp_alloc_scratches(void) int i; for_each_possible_cpu(i) { + struct page *page; void *mem; scratch = per_cpu_ptr(&scomp_scratch, i); - mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i)); - if (!mem) + page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, 0); + if (!page) goto error; - scratch->src = mem; + scratch->src = page_address(page); mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i)); if (!mem) goto error; @@ -98,13 +106,66 @@ error: return -ENOMEM; } +static void scomp_free_streams(struct scomp_alg *alg) +{ + struct crypto_acomp_stream __percpu *stream = alg->stream; + int i; + + for_each_possible_cpu(i) { + struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i); + + if (!ps->ctx) + break; + + alg->free_ctx(ps->ctx); + } + + free_percpu(stream); +} + +static int scomp_alloc_streams(struct scomp_alg *alg) +{ + struct crypto_acomp_stream __percpu *stream; + int i; + + stream = alloc_percpu(struct crypto_acomp_stream); + if (!stream) + return -ENOMEM; + + for_each_possible_cpu(i) { + struct crypto_acomp_stream *ps = per_cpu_ptr(stream, i); + + ps->ctx = alg->alloc_ctx(); + if (IS_ERR(ps->ctx)) { + scomp_free_streams(alg); + return PTR_ERR(ps->ctx); + } + + spin_lock_init(&ps->lock); + } + + alg->stream = stream; + return 0; +} + static int crypto_scomp_init_tfm(struct crypto_tfm *tfm) { + struct scomp_alg *alg = crypto_scomp_alg(__crypto_scomp_tfm(tfm)); int ret = 0; mutex_lock(&scomp_lock); - if (!scomp_scratch_users++) + if (!alg->stream) { + ret = scomp_alloc_streams(alg); + if (ret) + goto unlock; + } + if (!scomp_scratch_users) { ret = crypto_scomp_alloc_scratches(); + if (ret) + goto unlock; + scomp_scratch_users++; + } +unlock: mutex_unlock(&scomp_lock); return ret; @@ -112,84 +173,144 @@ static int crypto_scomp_init_tfm(struct crypto_tfm *tfm) static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) { + struct scomp_scratch *scratch = raw_cpu_ptr(&scomp_scratch); struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); - void **tfm_ctx = acomp_tfm_ctx(tfm); + struct crypto_scomp **tfm_ctx = acomp_tfm_ctx(tfm); struct crypto_scomp *scomp = *tfm_ctx; - void **ctx = acomp_request_ctx(req); - struct scomp_scratch *scratch; - void *src, *dst; - unsigned int dlen; + struct crypto_acomp_stream *stream; + unsigned int slen = req->slen; + unsigned int dlen = req->dlen; + struct page *spage, *dpage; + unsigned int n; + const u8 *src; + size_t soff; + size_t doff; + u8 *dst; int ret; - if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) + if (!req->src || !slen) return -EINVAL; - if (req->dst && !req->dlen) + if (!req->dst || !dlen) return -EINVAL; - if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE) - req->dlen = SCOMP_SCRATCH_SIZE; - - dlen = req->dlen; - - scratch = raw_cpu_ptr(&scomp_scratch); - spin_lock(&scratch->lock); - - if (sg_nents(req->src) == 1 && !PageHighMem(sg_page(req->src))) { - src = page_to_virt(sg_page(req->src)) + req->src->offset; - } else { - scatterwalk_map_and_copy(scratch->src, req->src, 0, - req->slen, 0); + if (acomp_request_src_isvirt(req)) + src = req->svirt; + else { src = scratch->src; + do { + if (acomp_request_src_isfolio(req)) { + spage = folio_page(req->sfolio, 0); + soff = req->soff; + } else if (slen <= req->src->length) { + spage = sg_page(req->src); + soff = req->src->offset; + } else + break; + + spage = nth_page(spage, soff / PAGE_SIZE); + soff = offset_in_page(soff); + + n = slen / PAGE_SIZE; + n += (offset_in_page(slen) + soff - 1) / PAGE_SIZE; + if (PageHighMem(nth_page(spage, n)) && + size_add(soff, slen) > PAGE_SIZE) + break; + src = kmap_local_page(spage) + soff; + } while (0); } - if (req->dst && sg_nents(req->dst) == 1 && !PageHighMem(sg_page(req->dst))) - dst = page_to_virt(sg_page(req->dst)) + req->dst->offset; - else + if (acomp_request_dst_isvirt(req)) + dst = req->dvirt; + else { + unsigned int max = SCOMP_SCRATCH_SIZE; + dst = scratch->dst; + do { + if (acomp_request_dst_isfolio(req)) { + dpage = folio_page(req->dfolio, 0); + doff = req->doff; + } else if (dlen <= req->dst->length) { + dpage = sg_page(req->dst); + doff = req->dst->offset; + } else + break; + + dpage = nth_page(dpage, doff / PAGE_SIZE); + doff = offset_in_page(doff); + + n = dlen / PAGE_SIZE; + n += (offset_in_page(dlen) + doff - 1) / PAGE_SIZE; + if (PageHighMem(dpage + n) && + size_add(doff, dlen) > PAGE_SIZE) + break; + dst = kmap_local_page(dpage) + doff; + max = dlen; + } while (0); + dlen = min(dlen, max); + } + spin_lock_bh(&scratch->lock); + + if (src == scratch->src) + memcpy_from_sglist(scratch->src, req->src, 0, slen); + + stream = raw_cpu_ptr(crypto_scomp_alg(scomp)->stream); + spin_lock(&stream->lock); if (dir) - ret = crypto_scomp_compress(scomp, src, req->slen, - dst, &req->dlen, *ctx); + ret = crypto_scomp_compress(scomp, src, slen, + dst, &dlen, stream->ctx); else - ret = crypto_scomp_decompress(scomp, src, req->slen, - dst, &req->dlen, *ctx); - if (!ret) { - if (!req->dst) { - req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL); - if (!req->dst) { - ret = -ENOMEM; - goto out; - } - } else if (req->dlen > dlen) { - ret = -ENOSPC; - goto out; - } - if (dst == scratch->dst) { - scatterwalk_map_and_copy(scratch->dst, req->dst, 0, - req->dlen, 1); - } else { - int nr_pages = DIV_ROUND_UP(req->dst->offset + req->dlen, PAGE_SIZE); - int i; - struct page *dst_page = sg_page(req->dst); - - for (i = 0; i < nr_pages; i++) - flush_dcache_page(dst_page + i); + ret = crypto_scomp_decompress(scomp, src, slen, + dst, &dlen, stream->ctx); + + if (dst == scratch->dst) + memcpy_to_sglist(req->dst, 0, dst, dlen); + + spin_unlock(&stream->lock); + spin_unlock_bh(&scratch->lock); + + req->dlen = dlen; + + if (!acomp_request_dst_isvirt(req) && dst != scratch->dst) { + kunmap_local(dst); + dlen += doff; + for (;;) { + flush_dcache_page(dpage); + if (dlen <= PAGE_SIZE) + break; + dlen -= PAGE_SIZE; + dpage = nth_page(dpage, 1); } } -out: - spin_unlock(&scratch->lock); + if (!acomp_request_src_isvirt(req) && src != scratch->src) + kunmap_local(src); + return ret; } +static int scomp_acomp_chain(struct acomp_req *req, int dir) +{ + struct acomp_req *r2; + int err; + + err = scomp_acomp_comp_decomp(req, dir); + req->base.err = err; + + list_for_each_entry(r2, &req->base.list, base.list) + r2->base.err = scomp_acomp_comp_decomp(r2, dir); + + return err; +} + static int scomp_acomp_compress(struct acomp_req *req) { - return scomp_acomp_comp_decomp(req, 1); + return scomp_acomp_chain(req, 1); } static int scomp_acomp_decompress(struct acomp_req *req) { - return scomp_acomp_comp_decomp(req, 0); + return scomp_acomp_chain(req, 0); } static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm) @@ -225,46 +346,19 @@ int crypto_init_scomp_ops_async(struct crypto_tfm *tfm) crt->compress = scomp_acomp_compress; crt->decompress = scomp_acomp_decompress; - crt->dst_free = sgl_free; - crt->reqsize = sizeof(void *); return 0; } -struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req) -{ - struct crypto_acomp *acomp = crypto_acomp_reqtfm(req); - struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); - struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm); - struct crypto_scomp *scomp = *tfm_ctx; - void *ctx; - - ctx = crypto_scomp_alloc_ctx(scomp); - if (IS_ERR(ctx)) { - kfree(req); - return NULL; - } - - *req->__ctx = ctx; - - return req; -} - -void crypto_acomp_scomp_free_ctx(struct acomp_req *req) +static void crypto_scomp_destroy(struct crypto_alg *alg) { - struct crypto_acomp *acomp = crypto_acomp_reqtfm(req); - struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); - struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm); - struct crypto_scomp *scomp = *tfm_ctx; - void *ctx = *req->__ctx; - - if (ctx) - crypto_scomp_free_ctx(scomp, ctx); + scomp_free_streams(__crypto_scomp_alg(alg)); } static const struct crypto_type crypto_scomp_type = { .extsize = crypto_alg_extsize, .init_tfm = crypto_scomp_init_tfm, + .destroy = crypto_scomp_destroy, #ifdef CONFIG_PROC_FS .show = crypto_scomp_show, #endif @@ -277,12 +371,21 @@ static const struct crypto_type crypto_scomp_type = { .tfmsize = offsetof(struct crypto_scomp, base), }; -int crypto_register_scomp(struct scomp_alg *alg) +static void scomp_prepare_alg(struct scomp_alg *alg) { struct crypto_alg *base = &alg->calg.base; comp_prepare_alg(&alg->calg); + base->cra_flags |= CRYPTO_ALG_REQ_CHAIN; +} + +int crypto_register_scomp(struct scomp_alg *alg) +{ + struct crypto_alg *base = &alg->calg.base; + + scomp_prepare_alg(alg); + base->cra_type = &crypto_scomp_type; base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS; diff --git a/crypto/skcipher.c b/crypto/skcipher.c index a9eb2dcf2898..132075a905d9 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -22,6 +22,7 @@ #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/string.h> +#include <linux/string_choices.h> #include <net/netlink.h> #include "skcipher.h" @@ -38,26 +39,6 @@ static const struct crypto_type crypto_skcipher_type; static int skcipher_walk_next(struct skcipher_walk *walk); -static inline void skcipher_map_src(struct skcipher_walk *walk) -{ - walk->src.virt.addr = scatterwalk_map(&walk->in); -} - -static inline void skcipher_map_dst(struct skcipher_walk *walk) -{ - walk->dst.virt.addr = scatterwalk_map(&walk->out); -} - -static inline void skcipher_unmap_src(struct skcipher_walk *walk) -{ - scatterwalk_unmap(walk->src.virt.addr); -} - -static inline void skcipher_unmap_dst(struct skcipher_walk *walk) -{ - scatterwalk_unmap(walk->dst.virt.addr); -} - static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk) { return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC; @@ -69,14 +50,6 @@ static inline struct skcipher_alg *__crypto_skcipher_alg( return container_of(alg, struct skcipher_alg, base); } -static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) -{ - u8 *addr = PTR_ALIGN(walk->buffer, walk->alignmask + 1); - - scatterwalk_copychunks(addr, &walk->out, bsize, 1); - return 0; -} - /** * skcipher_walk_done() - finish one step of a skcipher_walk * @walk: the skcipher_walk @@ -111,15 +84,13 @@ int skcipher_walk_done(struct skcipher_walk *walk, int res) if (likely(!(walk->flags & (SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | SKCIPHER_WALK_DIFF)))) { -unmap_src: - skcipher_unmap_src(walk); + scatterwalk_advance(&walk->in, n); } else if (walk->flags & SKCIPHER_WALK_DIFF) { - skcipher_unmap_dst(walk); - goto unmap_src; + scatterwalk_done_src(&walk->in, n); } else if (walk->flags & SKCIPHER_WALK_COPY) { - skcipher_map_dst(walk); - memcpy(walk->dst.virt.addr, walk->page, n); - skcipher_unmap_dst(walk); + scatterwalk_advance(&walk->in, n); + scatterwalk_map(&walk->out); + memcpy(walk->out.addr, walk->page, n); } else { /* SKCIPHER_WALK_SLOW */ if (res > 0) { /* @@ -131,20 +102,19 @@ unmap_src: res = -EINVAL; total = 0; } else - n = skcipher_done_slow(walk, n); + memcpy_to_scatterwalk(&walk->out, walk->out.addr, n); + goto dst_done; } + scatterwalk_done_dst(&walk->out, n); +dst_done: + if (res > 0) res = 0; walk->total = total; walk->nbytes = 0; - scatterwalk_advance(&walk->in, n); - scatterwalk_advance(&walk->out, n); - scatterwalk_done(&walk->in, 0, total); - scatterwalk_done(&walk->out, 1, total); - if (total) { if (walk->flags & SKCIPHER_WALK_SLEEP) cond_resched(); @@ -174,7 +144,7 @@ static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize) { unsigned alignmask = walk->alignmask; unsigned n; - u8 *buffer; + void *buffer; if (!walk->buffer) walk->buffer = walk->page; @@ -188,10 +158,11 @@ static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize) return skcipher_walk_done(walk, -ENOMEM); walk->buffer = buffer; } - walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1); - walk->src.virt.addr = walk->dst.virt.addr; - scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0); + buffer = PTR_ALIGN(buffer, alignmask + 1); + memcpy_from_scatterwalk(buffer, &walk->in, bsize); + walk->out.__addr = buffer; + walk->in.__addr = walk->out.addr; walk->nbytes = bsize; walk->flags |= SKCIPHER_WALK_SLOW; @@ -201,14 +172,18 @@ static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize) static int skcipher_next_copy(struct skcipher_walk *walk) { - u8 *tmp = walk->page; + void *tmp = walk->page; - skcipher_map_src(walk); - memcpy(tmp, walk->src.virt.addr, walk->nbytes); - skcipher_unmap_src(walk); + scatterwalk_map(&walk->in); + memcpy(tmp, walk->in.addr, walk->nbytes); + scatterwalk_unmap(&walk->in); + /* + * walk->in is advanced later when the number of bytes actually + * processed (which might be less than walk->nbytes) is known. + */ - walk->src.virt.addr = tmp; - walk->dst.virt.addr = tmp; + walk->in.__addr = tmp; + walk->out.__addr = tmp; return 0; } @@ -218,15 +193,15 @@ static int skcipher_next_fast(struct skcipher_walk *walk) diff = offset_in_page(walk->in.offset) - offset_in_page(walk->out.offset); - diff |= (u8 *)scatterwalk_page(&walk->in) - - (u8 *)scatterwalk_page(&walk->out); + diff |= (u8 *)(sg_page(walk->in.sg) + (walk->in.offset >> PAGE_SHIFT)) - + (u8 *)(sg_page(walk->out.sg) + (walk->out.offset >> PAGE_SHIFT)); - skcipher_map_src(walk); - walk->dst.virt.addr = walk->src.virt.addr; + scatterwalk_map(&walk->out); + walk->in.__addr = walk->out.__addr; if (diff) { walk->flags |= SKCIPHER_WALK_DIFF; - skcipher_map_dst(walk); + scatterwalk_map(&walk->in); } return 0; @@ -305,14 +280,16 @@ static int skcipher_walk_first(struct skcipher_walk *walk) return skcipher_walk_next(walk); } -int skcipher_walk_virt(struct skcipher_walk *walk, - struct skcipher_request *req, bool atomic) +int skcipher_walk_virt(struct skcipher_walk *__restrict walk, + struct skcipher_request *__restrict req, bool atomic) { - const struct skcipher_alg *alg = - crypto_skcipher_alg(crypto_skcipher_reqtfm(req)); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct skcipher_alg *alg; might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); + alg = crypto_skcipher_alg(tfm); + walk->total = req->cryptlen; walk->nbytes = 0; walk->iv = req->iv; @@ -328,14 +305,9 @@ int skcipher_walk_virt(struct skcipher_walk *walk, scatterwalk_start(&walk->in, req->src); scatterwalk_start(&walk->out, req->dst); - /* - * Accessing 'alg' directly generates better code than using the - * crypto_skcipher_blocksize() and similar helper functions here, as it - * prevents the algorithm pointer from being repeatedly reloaded. - */ - walk->blocksize = alg->base.cra_blocksize; - walk->ivsize = alg->co.ivsize; - walk->alignmask = alg->base.cra_alignmask; + walk->blocksize = crypto_skcipher_blocksize(tfm); + walk->ivsize = crypto_skcipher_ivsize(tfm); + walk->alignmask = crypto_skcipher_alignmask(tfm); if (alg->co.base.cra_type != &crypto_skcipher_type) walk->stride = alg->co.chunksize; @@ -346,10 +318,11 @@ int skcipher_walk_virt(struct skcipher_walk *walk, } EXPORT_SYMBOL_GPL(skcipher_walk_virt); -static int skcipher_walk_aead_common(struct skcipher_walk *walk, - struct aead_request *req, bool atomic) +static int skcipher_walk_aead_common(struct skcipher_walk *__restrict walk, + struct aead_request *__restrict req, + bool atomic) { - const struct aead_alg *alg = crypto_aead_alg(crypto_aead_reqtfm(req)); + struct crypto_aead *tfm = crypto_aead_reqtfm(req); walk->nbytes = 0; walk->iv = req->iv; @@ -362,30 +335,20 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk, if (unlikely(!walk->total)) return 0; - scatterwalk_start(&walk->in, req->src); - scatterwalk_start(&walk->out, req->dst); - - scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2); - scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2); - - scatterwalk_done(&walk->in, 0, walk->total); - scatterwalk_done(&walk->out, 0, walk->total); + scatterwalk_start_at_pos(&walk->in, req->src, req->assoclen); + scatterwalk_start_at_pos(&walk->out, req->dst, req->assoclen); - /* - * Accessing 'alg' directly generates better code than using the - * crypto_aead_blocksize() and similar helper functions here, as it - * prevents the algorithm pointer from being repeatedly reloaded. - */ - walk->blocksize = alg->base.cra_blocksize; - walk->stride = alg->chunksize; - walk->ivsize = alg->ivsize; - walk->alignmask = alg->base.cra_alignmask; + walk->blocksize = crypto_aead_blocksize(tfm); + walk->stride = crypto_aead_chunksize(tfm); + walk->ivsize = crypto_aead_ivsize(tfm); + walk->alignmask = crypto_aead_alignmask(tfm); return skcipher_walk_first(walk); } -int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, - struct aead_request *req, bool atomic) +int skcipher_walk_aead_encrypt(struct skcipher_walk *__restrict walk, + struct aead_request *__restrict req, + bool atomic) { walk->total = req->cryptlen; @@ -393,8 +356,9 @@ int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, } EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt); -int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, - struct aead_request *req, bool atomic) +int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk, + struct aead_request *__restrict req, + bool atomic) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); @@ -612,7 +576,7 @@ static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) seq_printf(m, "type : skcipher\n"); seq_printf(m, "async : %s\n", - alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no"); + str_yes_no(alg->cra_flags & CRYPTO_ALG_ASYNC)); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "min keysize : %u\n", skcipher->min_keysize); seq_printf(m, "max keysize : %u\n", skcipher->max_keysize); @@ -681,6 +645,7 @@ struct crypto_sync_skcipher *crypto_alloc_sync_skcipher( /* Only sync algorithms allowed. */ mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE; + type &= ~(CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE); tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask); diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index 879fc21dcc16..96f4a66be14c 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -716,6 +716,207 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret) return crypto_wait_req(ret, wait); } +struct test_mb_ahash_data { + struct scatterlist sg[XBUFSIZE]; + char result[64]; + struct ahash_request *req; + struct crypto_wait wait; + char *xbuf[XBUFSIZE]; +}; + +static inline int do_mult_ahash_op(struct test_mb_ahash_data *data, u32 num_mb, + int *rc) +{ + int i, err; + + /* Fire up a bunch of concurrent requests */ + err = crypto_ahash_digest(data[0].req); + + /* Wait for all requests to finish */ + err = crypto_wait_req(err, &data[0].wait); + if (num_mb < 2) + return err; + + for (i = 0; i < num_mb; i++) { + rc[i] = ahash_request_err(data[i].req); + if (rc[i]) { + pr_info("concurrent request %d error %d\n", i, rc[i]); + err = rc[i]; + } + } + + return err; +} + +static int test_mb_ahash_jiffies(struct test_mb_ahash_data *data, int blen, + int secs, u32 num_mb) +{ + unsigned long start, end; + int bcount; + int ret = 0; + int *rc; + + rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL); + if (!rc) + return -ENOMEM; + + for (start = jiffies, end = start + secs * HZ, bcount = 0; + time_before(jiffies, end); bcount++) { + ret = do_mult_ahash_op(data, num_mb, rc); + if (ret) + goto out; + } + + pr_cont("%d operations in %d seconds (%llu bytes)\n", + bcount * num_mb, secs, (u64)bcount * blen * num_mb); + +out: + kfree(rc); + return ret; +} + +static int test_mb_ahash_cycles(struct test_mb_ahash_data *data, int blen, + u32 num_mb) +{ + unsigned long cycles = 0; + int ret = 0; + int i; + int *rc; + + rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL); + if (!rc) + return -ENOMEM; + + /* Warm-up run. */ + for (i = 0; i < 4; i++) { + ret = do_mult_ahash_op(data, num_mb, rc); + if (ret) + goto out; + } + + /* The real thing. */ + for (i = 0; i < 8; i++) { + cycles_t start, end; + + start = get_cycles(); + ret = do_mult_ahash_op(data, num_mb, rc); + end = get_cycles(); + + if (ret) + goto out; + + cycles += end - start; + } + + pr_cont("1 operation in %lu cycles (%d bytes)\n", + (cycles + 4) / (8 * num_mb), blen); + +out: + kfree(rc); + return ret; +} + +static void test_mb_ahash_speed(const char *algo, unsigned int secs, + struct hash_speed *speed, u32 num_mb) +{ + struct test_mb_ahash_data *data; + struct crypto_ahash *tfm; + unsigned int i, j, k; + int ret; + + data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL); + if (!data) + return; + + tfm = crypto_alloc_ahash(algo, 0, 0); + if (IS_ERR(tfm)) { + pr_err("failed to load transform for %s: %ld\n", + algo, PTR_ERR(tfm)); + goto free_data; + } + + for (i = 0; i < num_mb; ++i) { + if (testmgr_alloc_buf(data[i].xbuf)) + goto out; + + crypto_init_wait(&data[i].wait); + + data[i].req = ahash_request_alloc(tfm, GFP_KERNEL); + if (!data[i].req) { + pr_err("alg: hash: Failed to allocate request for %s\n", + algo); + goto out; + } + + + if (i) { + ahash_request_set_callback(data[i].req, 0, NULL, NULL); + ahash_request_chain(data[i].req, data[0].req); + } else + ahash_request_set_callback(data[0].req, 0, + crypto_req_done, + &data[0].wait); + + sg_init_table(data[i].sg, XBUFSIZE); + for (j = 0; j < XBUFSIZE; j++) { + sg_set_buf(data[i].sg + j, data[i].xbuf[j], PAGE_SIZE); + memset(data[i].xbuf[j], 0xff, PAGE_SIZE); + } + } + + pr_info("\ntesting speed of multibuffer %s (%s)\n", algo, + get_driver_name(crypto_ahash, tfm)); + + for (i = 0; speed[i].blen != 0; i++) { + /* For some reason this only tests digests. */ + if (speed[i].blen != speed[i].plen) + continue; + + if (speed[i].blen > XBUFSIZE * PAGE_SIZE) { + pr_err("template (%u) too big for tvmem (%lu)\n", + speed[i].blen, XBUFSIZE * PAGE_SIZE); + goto out; + } + + if (klen) + crypto_ahash_setkey(tfm, tvmem[0], klen); + + for (k = 0; k < num_mb; k++) + ahash_request_set_crypt(data[k].req, data[k].sg, + data[k].result, speed[i].blen); + + pr_info("test%3u " + "(%5u byte blocks,%5u bytes per update,%4u updates): ", + i, speed[i].blen, speed[i].plen, + speed[i].blen / speed[i].plen); + + if (secs) { + ret = test_mb_ahash_jiffies(data, speed[i].blen, secs, + num_mb); + cond_resched(); + } else { + ret = test_mb_ahash_cycles(data, speed[i].blen, num_mb); + } + + + if (ret) { + pr_err("At least one hashing failed ret=%d\n", ret); + break; + } + } + +out: + ahash_request_free(data[0].req); + + for (k = 0; k < num_mb; ++k) + testmgr_free_buf(data[k].xbuf); + + crypto_free_ahash(tfm); + +free_data: + kfree(data); +} + static int test_ahash_jiffies_digest(struct ahash_request *req, int blen, char *out, int secs) { @@ -2383,6 +2584,36 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) test_ahash_speed("sm3", sec, generic_hash_speed_template); if (mode > 400 && mode < 500) break; fallthrough; + case 450: + test_mb_ahash_speed("sha1", sec, generic_hash_speed_template, + num_mb); + if (mode > 400 && mode < 500) break; + fallthrough; + case 451: + test_mb_ahash_speed("sha256", sec, generic_hash_speed_template, + num_mb); + if (mode > 400 && mode < 500) break; + fallthrough; + case 452: + test_mb_ahash_speed("sha512", sec, generic_hash_speed_template, + num_mb); + if (mode > 400 && mode < 500) break; + fallthrough; + case 453: + test_mb_ahash_speed("sm3", sec, generic_hash_speed_template, + num_mb); + if (mode > 400 && mode < 500) break; + fallthrough; + case 454: + test_mb_ahash_speed("streebog256", sec, + generic_hash_speed_template, num_mb); + if (mode > 400 && mode < 500) break; + fallthrough; + case 455: + test_mb_ahash_speed("streebog512", sec, + generic_hash_speed_template, num_mb); + if (mode > 400 && mode < 500) break; + fallthrough; case 499: break; diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 1e42582bc7f1..d294c5948b67 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -58,6 +58,9 @@ module_param(fuzz_iterations, uint, 0644); MODULE_PARM_DESC(fuzz_iterations, "number of fuzz test iterations"); #endif +/* Multibuffer is unlimited. Set arbitrary limit for testing. */ +#define MAX_MB_MSGS 16 + #ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS /* a perfect nop */ @@ -299,6 +302,13 @@ struct test_sg_division { * @key_offset_relative_to_alignmask: if true, add the algorithm's alignmask to * the @key_offset * @finalization_type: what finalization function to use for hashes + * @multibuffer: test with multibuffer + * @multibuffer_index: random number used to generate the message index to use + * for multibuffer. + * @multibuffer_uneven: test with multibuffer using uneven lengths + * @multibuffer_lens: random lengths to make chained request uneven + * @multibuffer_count: random number used to generate the num_msgs parameter + * for multibuffer * @nosimd: execute with SIMD disabled? Requires !CRYPTO_TFM_REQ_MAY_SLEEP. * This applies to the parts of the operation that aren't controlled * individually by @nosimd_setkey or @src_divs[].nosimd. @@ -318,6 +328,11 @@ struct testvec_config { enum finalization_type finalization_type; bool nosimd; bool nosimd_setkey; + bool multibuffer; + unsigned int multibuffer_index; + unsigned int multibuffer_count; + bool multibuffer_uneven; + unsigned int multibuffer_lens[MAX_MB_MSGS]; }; #define TESTVEC_CONFIG_NAMELEN 192 @@ -557,6 +572,7 @@ struct test_sglist { char *bufs[XBUFSIZE]; struct scatterlist sgl[XBUFSIZE]; struct scatterlist sgl_saved[XBUFSIZE]; + struct scatterlist full_sgl[XBUFSIZE]; struct scatterlist *sgl_ptr; unsigned int nents; }; @@ -670,6 +686,11 @@ static int build_test_sglist(struct test_sglist *tsgl, sg_mark_end(&tsgl->sgl[tsgl->nents - 1]); tsgl->sgl_ptr = tsgl->sgl; memcpy(tsgl->sgl_saved, tsgl->sgl, tsgl->nents * sizeof(tsgl->sgl[0])); + + sg_init_table(tsgl->full_sgl, XBUFSIZE); + for (i = 0; i < XBUFSIZE; i++) + sg_set_buf(tsgl->full_sgl, tsgl->bufs[i], PAGE_SIZE * 2); + return 0; } @@ -1146,6 +1167,27 @@ static void generate_random_testvec_config(struct rnd_state *rng, break; } + if (prandom_bool(rng)) { + int i; + + cfg->multibuffer = true; + cfg->multibuffer_count = prandom_u32_state(rng); + cfg->multibuffer_count %= MAX_MB_MSGS; + if (cfg->multibuffer_count++) { + cfg->multibuffer_index = prandom_u32_state(rng); + cfg->multibuffer_index %= cfg->multibuffer_count; + } + + cfg->multibuffer_uneven = prandom_bool(rng); + for (i = 0; i < MAX_MB_MSGS; i++) + cfg->multibuffer_lens[i] = + generate_random_length(rng, PAGE_SIZE * 2 * XBUFSIZE); + + p += scnprintf(p, end - p, " multibuffer(%d/%d%s)", + cfg->multibuffer_index, cfg->multibuffer_count, + cfg->multibuffer_uneven ? "/uneven" : ""); + } + if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP)) { if (prandom_bool(rng)) { cfg->nosimd = true; @@ -1450,6 +1492,7 @@ static int do_ahash_op(int (*op)(struct ahash_request *req), struct ahash_request *req, struct crypto_wait *wait, bool nosimd) { + struct ahash_request *r2; int err; if (nosimd) @@ -1460,7 +1503,15 @@ static int do_ahash_op(int (*op)(struct ahash_request *req), if (nosimd) crypto_reenable_simd_for_test(); - return crypto_wait_req(err, wait); + err = crypto_wait_req(err, wait); + if (err) + return err; + + list_for_each_entry(r2, &req->base.list, base.list) + if (r2->base.err) + return r2->base.err; + + return 0; } static int check_nonfinal_ahash_op(const char *op, int err, @@ -1481,20 +1532,65 @@ static int check_nonfinal_ahash_op(const char *op, int err, return 0; } +static void setup_ahash_multibuffer( + struct ahash_request *reqs[MAX_MB_MSGS], + const struct testvec_config *cfg, + struct test_sglist *tsgl) +{ + struct scatterlist *sg = tsgl->full_sgl; + static u8 trash[HASH_MAX_DIGESTSIZE]; + struct ahash_request *req = reqs[0]; + unsigned int num_msgs; + unsigned int msg_idx; + int i; + + if (!cfg->multibuffer) + return; + + num_msgs = cfg->multibuffer_count; + if (num_msgs == 1) + return; + + msg_idx = cfg->multibuffer_index; + for (i = 1; i < num_msgs; i++) { + struct ahash_request *r2 = reqs[i]; + unsigned int nbytes = req->nbytes; + + if (cfg->multibuffer_uneven) + nbytes = cfg->multibuffer_lens[i]; + + ahash_request_set_callback(r2, req->base.flags, NULL, NULL); + ahash_request_set_crypt(r2, sg, trash, nbytes); + ahash_request_chain(r2, req); + } + + if (msg_idx) { + reqs[msg_idx]->src = req->src; + reqs[msg_idx]->nbytes = req->nbytes; + reqs[msg_idx]->result = req->result; + req->src = sg; + if (cfg->multibuffer_uneven) + req->nbytes = cfg->multibuffer_lens[0]; + req->result = trash; + } +} + /* Test one hash test vector in one configuration, using the ahash API */ static int test_ahash_vec_cfg(const struct hash_testvec *vec, const char *vec_name, const struct testvec_config *cfg, - struct ahash_request *req, + struct ahash_request *reqs[MAX_MB_MSGS], struct test_sglist *tsgl, u8 *hashstate) { + struct ahash_request *req = reqs[0]; struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); const unsigned int digestsize = crypto_ahash_digestsize(tfm); const unsigned int statesize = crypto_ahash_statesize(tfm); const char *driver = crypto_ahash_driver_name(tfm); const u32 req_flags = CRYPTO_TFM_REQ_MAY_BACKLOG | cfg->req_flags; const struct test_sg_division *divs[XBUFSIZE]; + struct ahash_request *reqi = req; DECLARE_CRYPTO_WAIT(wait); unsigned int i; struct scatterlist *pending_sgl; @@ -1502,6 +1598,9 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec, u8 result[HASH_MAX_DIGESTSIZE + TESTMGR_POISON_LEN]; int err; + if (cfg->multibuffer) + reqi = reqs[cfg->multibuffer_index]; + /* Set the key, if specified */ if (vec->ksize) { err = do_setkey(crypto_ahash_setkey, tfm, vec->key, vec->ksize, @@ -1531,7 +1630,7 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec, /* Do the actual hashing */ - testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm)); + testmgr_poison(reqi->__ctx, crypto_ahash_reqsize(tfm)); testmgr_poison(result, digestsize + TESTMGR_POISON_LEN); if (cfg->finalization_type == FINALIZATION_TYPE_DIGEST || @@ -1540,6 +1639,7 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec, ahash_request_set_callback(req, req_flags, crypto_req_done, &wait); ahash_request_set_crypt(req, tsgl->sgl, result, vec->psize); + setup_ahash_multibuffer(reqs, cfg, tsgl); err = do_ahash_op(crypto_ahash_digest, req, &wait, cfg->nosimd); if (err) { if (err == vec->digest_error) @@ -1561,6 +1661,7 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec, ahash_request_set_callback(req, req_flags, crypto_req_done, &wait); ahash_request_set_crypt(req, NULL, result, 0); + setup_ahash_multibuffer(reqs, cfg, tsgl); err = do_ahash_op(crypto_ahash_init, req, &wait, cfg->nosimd); err = check_nonfinal_ahash_op("init", err, result, digestsize, driver, vec_name, cfg); @@ -1577,6 +1678,7 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec, crypto_req_done, &wait); ahash_request_set_crypt(req, pending_sgl, result, pending_len); + setup_ahash_multibuffer(reqs, cfg, tsgl); err = do_ahash_op(crypto_ahash_update, req, &wait, divs[i]->nosimd); err = check_nonfinal_ahash_op("update", err, @@ -1591,7 +1693,7 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec, /* Test ->export() and ->import() */ testmgr_poison(hashstate + statesize, TESTMGR_POISON_LEN); - err = crypto_ahash_export(req, hashstate); + err = crypto_ahash_export(reqi, hashstate); err = check_nonfinal_ahash_op("export", err, result, digestsize, driver, vec_name, cfg); @@ -1604,8 +1706,8 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec, return -EOVERFLOW; } - testmgr_poison(req->__ctx, crypto_ahash_reqsize(tfm)); - err = crypto_ahash_import(req, hashstate); + testmgr_poison(reqi->__ctx, crypto_ahash_reqsize(tfm)); + err = crypto_ahash_import(reqi, hashstate); err = check_nonfinal_ahash_op("import", err, result, digestsize, driver, vec_name, cfg); @@ -1619,6 +1721,7 @@ static int test_ahash_vec_cfg(const struct hash_testvec *vec, ahash_request_set_callback(req, req_flags, crypto_req_done, &wait); ahash_request_set_crypt(req, pending_sgl, result, pending_len); + setup_ahash_multibuffer(reqs, cfg, tsgl); if (cfg->finalization_type == FINALIZATION_TYPE_FINAL) { /* finish with update() and final() */ err = do_ahash_op(crypto_ahash_update, req, &wait, cfg->nosimd); @@ -1650,7 +1753,7 @@ result_ready: static int test_hash_vec_cfg(const struct hash_testvec *vec, const char *vec_name, const struct testvec_config *cfg, - struct ahash_request *req, + struct ahash_request *reqs[MAX_MB_MSGS], struct shash_desc *desc, struct test_sglist *tsgl, u8 *hashstate) @@ -1670,11 +1773,12 @@ static int test_hash_vec_cfg(const struct hash_testvec *vec, return err; } - return test_ahash_vec_cfg(vec, vec_name, cfg, req, tsgl, hashstate); + return test_ahash_vec_cfg(vec, vec_name, cfg, reqs, tsgl, hashstate); } static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num, - struct ahash_request *req, struct shash_desc *desc, + struct ahash_request *reqs[MAX_MB_MSGS], + struct shash_desc *desc, struct test_sglist *tsgl, u8 *hashstate) { char vec_name[16]; @@ -1686,7 +1790,7 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num, for (i = 0; i < ARRAY_SIZE(default_hash_testvec_configs); i++) { err = test_hash_vec_cfg(vec, vec_name, &default_hash_testvec_configs[i], - req, desc, tsgl, hashstate); + reqs, desc, tsgl, hashstate); if (err) return err; } @@ -1703,7 +1807,7 @@ static int test_hash_vec(const struct hash_testvec *vec, unsigned int vec_num, generate_random_testvec_config(&rng, &cfg, cfgname, sizeof(cfgname)); err = test_hash_vec_cfg(vec, vec_name, &cfg, - req, desc, tsgl, hashstate); + reqs, desc, tsgl, hashstate); if (err) return err; cond_resched(); @@ -1762,11 +1866,12 @@ done: */ static int test_hash_vs_generic_impl(const char *generic_driver, unsigned int maxkeysize, - struct ahash_request *req, + struct ahash_request *reqs[MAX_MB_MSGS], struct shash_desc *desc, struct test_sglist *tsgl, u8 *hashstate) { + struct ahash_request *req = reqs[0]; struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); const unsigned int digestsize = crypto_ahash_digestsize(tfm); const unsigned int blocksize = crypto_ahash_blocksize(tfm); @@ -1864,7 +1969,7 @@ static int test_hash_vs_generic_impl(const char *generic_driver, sizeof(cfgname)); err = test_hash_vec_cfg(&vec, vec_name, cfg, - req, desc, tsgl, hashstate); + reqs, desc, tsgl, hashstate); if (err) goto out; cond_resched(); @@ -1882,7 +1987,7 @@ out: #else /* !CONFIG_CRYPTO_MANAGER_EXTRA_TESTS */ static int test_hash_vs_generic_impl(const char *generic_driver, unsigned int maxkeysize, - struct ahash_request *req, + struct ahash_request *reqs[MAX_MB_MSGS], struct shash_desc *desc, struct test_sglist *tsgl, u8 *hashstate) @@ -1929,8 +2034,8 @@ static int __alg_test_hash(const struct hash_testvec *vecs, u32 type, u32 mask, const char *generic_driver, unsigned int maxkeysize) { + struct ahash_request *reqs[MAX_MB_MSGS] = {}; struct crypto_ahash *atfm = NULL; - struct ahash_request *req = NULL; struct crypto_shash *stfm = NULL; struct shash_desc *desc = NULL; struct test_sglist *tsgl = NULL; @@ -1954,12 +2059,14 @@ static int __alg_test_hash(const struct hash_testvec *vecs, } driver = crypto_ahash_driver_name(atfm); - req = ahash_request_alloc(atfm, GFP_KERNEL); - if (!req) { - pr_err("alg: hash: failed to allocate request for %s\n", - driver); - err = -ENOMEM; - goto out; + for (i = 0; i < MAX_MB_MSGS; i++) { + reqs[i] = ahash_request_alloc(atfm, GFP_KERNEL); + if (!reqs[i]) { + pr_err("alg: hash: failed to allocate request for %s\n", + driver); + err = -ENOMEM; + goto out; + } } /* @@ -1995,12 +2102,12 @@ static int __alg_test_hash(const struct hash_testvec *vecs, if (fips_enabled && vecs[i].fips_skip) continue; - err = test_hash_vec(&vecs[i], i, req, desc, tsgl, hashstate); + err = test_hash_vec(&vecs[i], i, reqs, desc, tsgl, hashstate); if (err) goto out; cond_resched(); } - err = test_hash_vs_generic_impl(generic_driver, maxkeysize, req, + err = test_hash_vs_generic_impl(generic_driver, maxkeysize, reqs, desc, tsgl, hashstate); out: kfree(hashstate); @@ -2010,7 +2117,12 @@ out: } kfree(desc); crypto_free_shash(stfm); - ahash_request_free(req); + if (reqs[0]) { + ahash_request_set_callback(reqs[0], 0, NULL, NULL); + for (i = 1; i < MAX_MB_MSGS && reqs[i]; i++) + ahash_request_chain(reqs[i], reqs[0]); + ahash_request_free(reqs[0]); + } crypto_free_ahash(atfm); return err; } @@ -3320,139 +3432,54 @@ out: return err; } -static int test_comp(struct crypto_comp *tfm, - const struct comp_testvec *ctemplate, - const struct comp_testvec *dtemplate, - int ctcount, int dtcount) +static int test_acomp(struct crypto_acomp *tfm, + const struct comp_testvec *ctemplate, + const struct comp_testvec *dtemplate, + int ctcount, int dtcount) { - const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm)); - char *output, *decomp_output; + const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)); + struct scatterlist *src = NULL, *dst = NULL; + struct acomp_req *reqs[MAX_MB_MSGS] = {}; + char *decomp_out[MAX_MB_MSGS] = {}; + char *output[MAX_MB_MSGS] = {}; + struct crypto_wait wait; + struct acomp_req *req; + int ret = -ENOMEM; unsigned int i; - int ret; - - output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL); - if (!output) - return -ENOMEM; - decomp_output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL); - if (!decomp_output) { - kfree(output); - return -ENOMEM; - } - - for (i = 0; i < ctcount; i++) { - int ilen; - unsigned int dlen = COMP_BUF_SIZE; - - memset(output, 0, COMP_BUF_SIZE); - memset(decomp_output, 0, COMP_BUF_SIZE); - - ilen = ctemplate[i].inlen; - ret = crypto_comp_compress(tfm, ctemplate[i].input, - ilen, output, &dlen); - if (ret) { - printk(KERN_ERR "alg: comp: compression failed " - "on test %d for %s: ret=%d\n", i + 1, algo, - -ret); - goto out; - } - - ilen = dlen; - dlen = COMP_BUF_SIZE; - ret = crypto_comp_decompress(tfm, output, - ilen, decomp_output, &dlen); - if (ret) { - pr_err("alg: comp: compression failed: decompress: on test %d for %s failed: ret=%d\n", - i + 1, algo, -ret); - goto out; - } - - if (dlen != ctemplate[i].inlen) { - printk(KERN_ERR "alg: comp: Compression test %d " - "failed for %s: output len = %d\n", i + 1, algo, - dlen); - ret = -EINVAL; - goto out; - } + src = kmalloc_array(MAX_MB_MSGS, sizeof(*src), GFP_KERNEL); + if (!src) + goto out; + dst = kmalloc_array(MAX_MB_MSGS, sizeof(*dst), GFP_KERNEL); + if (!dst) + goto out; - if (memcmp(decomp_output, ctemplate[i].input, - ctemplate[i].inlen)) { - pr_err("alg: comp: compression failed: output differs: on test %d for %s\n", - i + 1, algo); - hexdump(decomp_output, dlen); - ret = -EINVAL; + for (i = 0; i < MAX_MB_MSGS; i++) { + reqs[i] = acomp_request_alloc(tfm); + if (!reqs[i]) goto out; - } - } - - for (i = 0; i < dtcount; i++) { - int ilen; - unsigned int dlen = COMP_BUF_SIZE; - - memset(decomp_output, 0, COMP_BUF_SIZE); - ilen = dtemplate[i].inlen; - ret = crypto_comp_decompress(tfm, dtemplate[i].input, - ilen, decomp_output, &dlen); - if (ret) { - printk(KERN_ERR "alg: comp: decompression failed " - "on test %d for %s: ret=%d\n", i + 1, algo, - -ret); - goto out; - } + acomp_request_set_callback(reqs[i], + CRYPTO_TFM_REQ_MAY_SLEEP | + CRYPTO_TFM_REQ_MAY_BACKLOG, + crypto_req_done, &wait); + if (i) + acomp_request_chain(reqs[i], reqs[0]); - if (dlen != dtemplate[i].outlen) { - printk(KERN_ERR "alg: comp: Decompression test %d " - "failed for %s: output len = %d\n", i + 1, algo, - dlen); - ret = -EINVAL; + output[i] = kmalloc(COMP_BUF_SIZE, GFP_KERNEL); + if (!output[i]) goto out; - } - if (memcmp(decomp_output, dtemplate[i].output, dlen)) { - printk(KERN_ERR "alg: comp: Decompression test %d " - "failed for %s\n", i + 1, algo); - hexdump(decomp_output, dlen); - ret = -EINVAL; + decomp_out[i] = kmalloc(COMP_BUF_SIZE, GFP_KERNEL); + if (!decomp_out[i]) goto out; - } - } - - ret = 0; - -out: - kfree(decomp_output); - kfree(output); - return ret; -} - -static int test_acomp(struct crypto_acomp *tfm, - const struct comp_testvec *ctemplate, - const struct comp_testvec *dtemplate, - int ctcount, int dtcount) -{ - const char *algo = crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)); - unsigned int i; - char *output, *decomp_out; - int ret; - struct scatterlist src, dst; - struct acomp_req *req; - struct crypto_wait wait; - - output = kmalloc(COMP_BUF_SIZE, GFP_KERNEL); - if (!output) - return -ENOMEM; - - decomp_out = kmalloc(COMP_BUF_SIZE, GFP_KERNEL); - if (!decomp_out) { - kfree(output); - return -ENOMEM; } for (i = 0; i < ctcount; i++) { unsigned int dlen = COMP_BUF_SIZE; int ilen = ctemplate[i].inlen; void *input_vec; + int j; input_vec = kmemdup(ctemplate[i].input, ilen, GFP_KERNEL); if (!input_vec) { @@ -3460,85 +3487,61 @@ static int test_acomp(struct crypto_acomp *tfm, goto out; } - memset(output, 0, dlen); crypto_init_wait(&wait); - sg_init_one(&src, input_vec, ilen); - sg_init_one(&dst, output, dlen); + sg_init_one(src, input_vec, ilen); - req = acomp_request_alloc(tfm); - if (!req) { - pr_err("alg: acomp: request alloc failed for %s\n", - algo); - kfree(input_vec); - ret = -ENOMEM; - goto out; + for (j = 0; j < MAX_MB_MSGS; j++) { + sg_init_one(dst + j, output[j], dlen); + acomp_request_set_params(reqs[j], src, dst + j, ilen, dlen); } - acomp_request_set_params(req, &src, &dst, ilen, dlen); - acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - crypto_req_done, &wait); - + req = reqs[0]; ret = crypto_wait_req(crypto_acomp_compress(req), &wait); if (ret) { pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n", i + 1, algo, -ret); kfree(input_vec); - acomp_request_free(req); goto out; } ilen = req->dlen; dlen = COMP_BUF_SIZE; - sg_init_one(&src, output, ilen); - sg_init_one(&dst, decomp_out, dlen); crypto_init_wait(&wait); - acomp_request_set_params(req, &src, &dst, ilen, dlen); - - ret = crypto_wait_req(crypto_acomp_decompress(req), &wait); - if (ret) { - pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n", - i + 1, algo, -ret); - kfree(input_vec); - acomp_request_free(req); - goto out; - } - - if (req->dlen != ctemplate[i].inlen) { - pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n", - i + 1, algo, req->dlen); - ret = -EINVAL; - kfree(input_vec); - acomp_request_free(req); - goto out; - } - - if (memcmp(input_vec, decomp_out, req->dlen)) { - pr_err("alg: acomp: Compression test %d failed for %s\n", - i + 1, algo); - hexdump(output, req->dlen); - ret = -EINVAL; - kfree(input_vec); - acomp_request_free(req); - goto out; - } + for (j = 0; j < MAX_MB_MSGS; j++) { + sg_init_one(src + j, output[j], ilen); + sg_init_one(dst + j, decomp_out[j], dlen); + acomp_request_set_params(reqs[j], src + j, dst + j, ilen, dlen); + } + + crypto_wait_req(crypto_acomp_decompress(req), &wait); + for (j = 0; j < MAX_MB_MSGS; j++) { + ret = reqs[j]->base.err; + if (ret) { + pr_err("alg: acomp: compression failed on test %d (%d) for %s: ret=%d\n", + i + 1, j, algo, -ret); + kfree(input_vec); + goto out; + } -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS - crypto_init_wait(&wait); - sg_init_one(&src, input_vec, ilen); - acomp_request_set_params(req, &src, NULL, ilen, 0); + if (reqs[j]->dlen != ctemplate[i].inlen) { + pr_err("alg: acomp: Compression test %d (%d) failed for %s: output len = %d\n", + i + 1, j, algo, reqs[j]->dlen); + ret = -EINVAL; + kfree(input_vec); + goto out; + } - ret = crypto_wait_req(crypto_acomp_compress(req), &wait); - if (ret) { - pr_err("alg: acomp: compression failed on NULL dst buffer test %d for %s: ret=%d\n", - i + 1, algo, -ret); - kfree(input_vec); - acomp_request_free(req); - goto out; + if (memcmp(input_vec, decomp_out[j], reqs[j]->dlen)) { + pr_err("alg: acomp: Compression test %d (%d) failed for %s\n", + i + 1, j, algo); + hexdump(output[j], reqs[j]->dlen); + ret = -EINVAL; + kfree(input_vec); + goto out; + } } -#endif kfree(input_vec); - acomp_request_free(req); } for (i = 0; i < dtcount; i++) { @@ -3552,10 +3555,9 @@ static int test_acomp(struct crypto_acomp *tfm, goto out; } - memset(output, 0, dlen); crypto_init_wait(&wait); - sg_init_one(&src, input_vec, ilen); - sg_init_one(&dst, output, dlen); + sg_init_one(src, input_vec, ilen); + sg_init_one(dst, output[0], dlen); req = acomp_request_alloc(tfm); if (!req) { @@ -3566,7 +3568,7 @@ static int test_acomp(struct crypto_acomp *tfm, goto out; } - acomp_request_set_params(req, &src, &dst, ilen, dlen); + acomp_request_set_params(req, src, dst, ilen, dlen); acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, crypto_req_done, &wait); @@ -3588,30 +3590,16 @@ static int test_acomp(struct crypto_acomp *tfm, goto out; } - if (memcmp(output, dtemplate[i].output, req->dlen)) { + if (memcmp(output[0], dtemplate[i].output, req->dlen)) { pr_err("alg: acomp: Decompression test %d failed for %s\n", i + 1, algo); - hexdump(output, req->dlen); + hexdump(output[0], req->dlen); ret = -EINVAL; kfree(input_vec); acomp_request_free(req); goto out; } -#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS - crypto_init_wait(&wait); - acomp_request_set_params(req, &src, NULL, ilen, 0); - - ret = crypto_wait_req(crypto_acomp_decompress(req), &wait); - if (ret) { - pr_err("alg: acomp: decompression failed on NULL dst buffer test %d for %s: ret=%d\n", - i + 1, algo, -ret); - kfree(input_vec); - acomp_request_free(req); - goto out; - } -#endif - kfree(input_vec); acomp_request_free(req); } @@ -3619,8 +3607,13 @@ static int test_acomp(struct crypto_acomp *tfm, ret = 0; out: - kfree(decomp_out); - kfree(output); + acomp_request_free(reqs[0]); + for (i = 0; i < MAX_MB_MSGS; i++) { + kfree(output[i]); + kfree(decomp_out[i]); + } + kfree(dst); + kfree(src); return ret; } @@ -3713,42 +3706,22 @@ static int alg_test_cipher(const struct alg_test_desc *desc, static int alg_test_comp(const struct alg_test_desc *desc, const char *driver, u32 type, u32 mask) { - struct crypto_comp *comp; struct crypto_acomp *acomp; int err; - u32 algo_type = type & CRYPTO_ALG_TYPE_ACOMPRESS_MASK; - - if (algo_type == CRYPTO_ALG_TYPE_ACOMPRESS) { - acomp = crypto_alloc_acomp(driver, type, mask); - if (IS_ERR(acomp)) { - if (PTR_ERR(acomp) == -ENOENT) - return 0; - pr_err("alg: acomp: Failed to load transform for %s: %ld\n", - driver, PTR_ERR(acomp)); - return PTR_ERR(acomp); - } - err = test_acomp(acomp, desc->suite.comp.comp.vecs, - desc->suite.comp.decomp.vecs, - desc->suite.comp.comp.count, - desc->suite.comp.decomp.count); - crypto_free_acomp(acomp); - } else { - comp = crypto_alloc_comp(driver, type, mask); - if (IS_ERR(comp)) { - if (PTR_ERR(comp) == -ENOENT) - return 0; - pr_err("alg: comp: Failed to load transform for %s: %ld\n", - driver, PTR_ERR(comp)); - return PTR_ERR(comp); - } - - err = test_comp(comp, desc->suite.comp.comp.vecs, - desc->suite.comp.decomp.vecs, - desc->suite.comp.comp.count, - desc->suite.comp.decomp.count); - crypto_free_comp(comp); - } + acomp = crypto_alloc_acomp(driver, type, mask); + if (IS_ERR(acomp)) { + if (PTR_ERR(acomp) == -ENOENT) + return 0; + pr_err("alg: acomp: Failed to load transform for %s: %ld\n", + driver, PTR_ERR(acomp)); + return PTR_ERR(acomp); + } + err = test_acomp(acomp, desc->suite.comp.comp.vecs, + desc->suite.comp.decomp.vecs, + desc->suite.comp.comp.count, + desc->suite.comp.decomp.count); + crypto_free_acomp(acomp); return err; } @@ -4328,7 +4301,7 @@ static int test_sig_one(struct crypto_sig *tfm, const struct sig_testvec *vecs) if (vecs->public_key_vec) return 0; - sig_size = crypto_sig_keysize(tfm); + sig_size = crypto_sig_maxsize(tfm); if (sig_size < vecs->c_size) { pr_err("alg: sig: invalid maxsize %u\n", sig_size); return -EINVAL; @@ -4340,13 +4313,14 @@ static int test_sig_one(struct crypto_sig *tfm, const struct sig_testvec *vecs) /* Run asymmetric signature generation */ err = crypto_sig_sign(tfm, vecs->m, vecs->m_size, sig, sig_size); - if (err) { + if (err < 0) { pr_err("alg: sig: sign test failed: err %d\n", err); return err; } /* Verify that generated signature equals cooked signature */ - if (memcmp(sig, vecs->c, vecs->c_size) || + if (err != vecs->c_size || + memcmp(sig, vecs->c, vecs->c_size) || memchr_inv(sig + vecs->c_size, 0, sig_size - vecs->c_size)) { pr_err("alg: sig: sign test failed: invalid output\n"); hexdump(sig, sig_size); @@ -4505,6 +4479,12 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_null, .fips_allowed = 1, }, { + .alg = "authenc(hmac(sha256),cts(cbc(aes)))", + .test = alg_test_aead, + .suite = { + .aead = __VECS(krb5_test_aes128_cts_hmac_sha256_128) + } + }, { .alg = "authenc(hmac(sha256),rfc3686(ctr(aes)))", .test = alg_test_null, .fips_allowed = 1, @@ -4525,6 +4505,12 @@ static const struct alg_test_desc alg_test_descs[] = { .test = alg_test_null, .fips_allowed = 1, }, { + .alg = "authenc(hmac(sha384),cts(cbc(aes)))", + .test = alg_test_aead, + .suite = { + .aead = __VECS(krb5_test_aes256_cts_hmac_sha384_192) + } + }, { .alg = "authenc(hmac(sha384),rfc3686(ctr(aes)))", .test = alg_test_null, .fips_allowed = 1, @@ -4743,9 +4729,6 @@ static const struct alg_test_desc alg_test_descs[] = { .hash = __VECS(sm4_cmac128_tv_template) } }, { - .alg = "compress_null", - .test = alg_test_null, - }, { .alg = "crc32", .test = alg_test_hash, .fips_allowed = 1, @@ -5384,6 +5367,10 @@ static const struct alg_test_desc alg_test_descs[] = { .fips_allowed = 1, .test = alg_test_null, }, { + .alg = "krb5enc(cmac(camellia),cts(cbc(camellia)))", + .test = alg_test_aead, + .suite.aead = __VECS(krb5_test_camellia_cts_cmac) + }, { .alg = "lrw(aes)", .generic_driver = "lrw(ecb(aes-generic))", .test = alg_test_skcipher, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index d3a99d15a3e5..afc10af59b0a 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -38591,4 +38591,355 @@ static const struct cipher_testvec aes_hctr2_tv_template[] = { }; +#ifdef __LITTLE_ENDIAN +#define AUTHENC_KEY_HEADER(enckeylen) \ + "\x08\x00\x01\x00" /* LE rtattr */ \ + enckeylen /* crypto_authenc_key_param */ +#else +#define AUTHENC_KEY_HEADER(enckeylen) \ + "\x00\x08\x00\x01" /* BE rtattr */ \ + enckeylen /* crypto_authenc_key_param */ +#endif + +static const struct aead_testvec krb5_test_aes128_cts_hmac_sha256_128[] = { + /* rfc8009 Appendix A */ + { + /* "enc no plain" */ + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x10") + "\x9F\xDA\x0E\x56\xAB\x2D\x85\xE1\x56\x9A\x68\x86\x96\xC2\x6A\x6C" // Ki + "\x9B\x19\x7D\xD1\xE8\xC5\x60\x9D\x6E\x67\xC3\xE3\x7C\x62\xC7\x2E", // Ke + .klen = 4 + 4 + 16 + 16, + .ptext = + "\x7E\x58\x95\xEA\xF2\x67\x24\x35\xBA\xD8\x17\xF5\x45\xA3\x71\x48" // Confounder + "", // Plain + .plen = 16 + 0, + .ctext = + "\xEF\x85\xFB\x89\x0B\xB8\x47\x2F\x4D\xAB\x20\x39\x4D\xCA\x78\x1D" + "\xAD\x87\x7E\xDA\x39\xD5\x0C\x87\x0C\x0D\x5A\x0A\x8E\x48\xC7\x18", + .clen = 16 + 0 + 16, + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV + .alen = 16, + }, { + /* "enc plain<block" */ + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x10") + "\x9F\xDA\x0E\x56\xAB\x2D\x85\xE1\x56\x9A\x68\x86\x96\xC2\x6A\x6C" // Ki + "\x9B\x19\x7D\xD1\xE8\xC5\x60\x9D\x6E\x67\xC3\xE3\x7C\x62\xC7\x2E", // Ke + .klen = 4 + 4 + 16 + 16, + .ptext = + "\x7B\xCA\x28\x5E\x2F\xD4\x13\x0F\xB5\x5B\x1A\x5C\x83\xBC\x5B\x24" // Confounder + "\x00\x01\x02\x03\x04\x05", // Plain + .plen = 16 + 6, + .ctext = + "\x84\xD7\xF3\x07\x54\xED\x98\x7B\xAB\x0B\xF3\x50\x6B\xEB\x09\xCF" + "\xB5\x54\x02\xCE\xF7\xE6\x87\x7C\xE9\x9E\x24\x7E\x52\xD1\x6E\xD4" + "\x42\x1D\xFD\xF8\x97\x6C", + .clen = 16 + 6 + 16, + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV + .alen = 16, + }, { + /* "enc plain==block" */ + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x10") + "\x9F\xDA\x0E\x56\xAB\x2D\x85\xE1\x56\x9A\x68\x86\x96\xC2\x6A\x6C" // Ki + "\x9B\x19\x7D\xD1\xE8\xC5\x60\x9D\x6E\x67\xC3\xE3\x7C\x62\xC7\x2E", // Ke + .klen = 4 + 4 + 16 + 16, + .ptext = + "\x56\xAB\x21\x71\x3F\xF6\x2C\x0A\x14\x57\x20\x0F\x6F\xA9\x94\x8F" // Confounder + "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", // Plain + .plen = 16 + 16, + .ctext = + "\x35\x17\xD6\x40\xF5\x0D\xDC\x8A\xD3\x62\x87\x22\xB3\x56\x9D\x2A" + "\xE0\x74\x93\xFA\x82\x63\x25\x40\x80\xEA\x65\xC1\x00\x8E\x8F\xC2" + "\x95\xFB\x48\x52\xE7\xD8\x3E\x1E\x7C\x48\xC3\x7E\xEB\xE6\xB0\xD3", + .clen = 16 + 16 + 16, + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV + .alen = 16, + }, { + /* "enc plain>block" */ + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x10") + "\x9F\xDA\x0E\x56\xAB\x2D\x85\xE1\x56\x9A\x68\x86\x96\xC2\x6A\x6C" // Ki + "\x9B\x19\x7D\xD1\xE8\xC5\x60\x9D\x6E\x67\xC3\xE3\x7C\x62\xC7\x2E", // Ke + .klen = 4 + 4 + 16 + 16, + .ptext = + "\xA7\xA4\xE2\x9A\x47\x28\xCE\x10\x66\x4F\xB6\x4E\x49\xAD\x3F\xAC" // Confounder + "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F" + "\x10\x11\x12\x13\x14", // Plain + .plen = 16 + 21, + .ctext = + "\x72\x0F\x73\xB1\x8D\x98\x59\xCD\x6C\xCB\x43\x46\x11\x5C\xD3\x36" + "\xC7\x0F\x58\xED\xC0\xC4\x43\x7C\x55\x73\x54\x4C\x31\xC8\x13\xBC" + "\xE1\xE6\xD0\x72\xC1\x86\xB3\x9A\x41\x3C\x2F\x92\xCA\x9B\x83\x34" + "\xA2\x87\xFF\xCB\xFC", + .clen = 16 + 21 + 16, + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV + .alen = 16, + }, +}; + +static const struct aead_testvec krb5_test_aes256_cts_hmac_sha384_192[] = { + /* rfc8009 Appendix A */ + { + /* "enc no plain" */ + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x20") + "\x69\xB1\x65\x14\xE3\xCD\x8E\x56\xB8\x20\x10\xD5\xC7\x30\x12\xB6" + "\x22\xC4\xD0\x0F\xFC\x23\xED\x1F" // Ki + "\x56\xAB\x22\xBE\xE6\x3D\x82\xD7\xBC\x52\x27\xF6\x77\x3F\x8E\xA7" + "\xA5\xEB\x1C\x82\x51\x60\xC3\x83\x12\x98\x0C\x44\x2E\x5C\x7E\x49", // Ke + .klen = 4 + 4 + 32 + 24, + .ptext = + "\xF7\x64\xE9\xFA\x15\xC2\x76\x47\x8B\x2C\x7D\x0C\x4E\x5F\x58\xE4" // Confounder + "", // Plain + .plen = 16 + 0, + .ctext = + "\x41\xF5\x3F\xA5\xBF\xE7\x02\x6D\x91\xFA\xF9\xBE\x95\x91\x95\xA0" + "\x58\x70\x72\x73\xA9\x6A\x40\xF0\xA0\x19\x60\x62\x1A\xC6\x12\x74" + "\x8B\x9B\xBF\xBE\x7E\xB4\xCE\x3C", + .clen = 16 + 0 + 24, + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV + .alen = 16, + }, { + /* "enc plain<block" */ + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x20") + "\x69\xB1\x65\x14\xE3\xCD\x8E\x56\xB8\x20\x10\xD5\xC7\x30\x12\xB6" + "\x22\xC4\xD0\x0F\xFC\x23\xED\x1F" // Ki + "\x56\xAB\x22\xBE\xE6\x3D\x82\xD7\xBC\x52\x27\xF6\x77\x3F\x8E\xA7" + "\xA5\xEB\x1C\x82\x51\x60\xC3\x83\x12\x98\x0C\x44\x2E\x5C\x7E\x49", // Ke + .klen = 4 + 4 + 32 + 24, + .ptext = + "\xB8\x0D\x32\x51\xC1\xF6\x47\x14\x94\x25\x6F\xFE\x71\x2D\x0B\x9A" // Confounder + "\x00\x01\x02\x03\x04\x05", // Plain + .plen = 16 + 6, + .ctext = + "\x4E\xD7\xB3\x7C\x2B\xCA\xC8\xF7\x4F\x23\xC1\xCF\x07\xE6\x2B\xC7" + "\xB7\x5F\xB3\xF6\x37\xB9\xF5\x59\xC7\xF6\x64\xF6\x9E\xAB\x7B\x60" + "\x92\x23\x75\x26\xEA\x0D\x1F\x61\xCB\x20\xD6\x9D\x10\xF2", + .clen = 16 + 6 + 24, + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV + .alen = 16, + }, { + /* "enc plain==block" */ + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x20") + "\x69\xB1\x65\x14\xE3\xCD\x8E\x56\xB8\x20\x10\xD5\xC7\x30\x12\xB6" + "\x22\xC4\xD0\x0F\xFC\x23\xED\x1F" // Ki + "\x56\xAB\x22\xBE\xE6\x3D\x82\xD7\xBC\x52\x27\xF6\x77\x3F\x8E\xA7" + "\xA5\xEB\x1C\x82\x51\x60\xC3\x83\x12\x98\x0C\x44\x2E\x5C\x7E\x49", // Ke + .klen = 4 + 4 + 32 + 24, + .ptext = + "\x53\xBF\x8A\x0D\x10\x52\x65\xD4\xE2\x76\x42\x86\x24\xCE\x5E\x63" // Confounder + "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F", // Plain + .plen = 16 + 16, + .ctext = + "\xBC\x47\xFF\xEC\x79\x98\xEB\x91\xE8\x11\x5C\xF8\xD1\x9D\xAC\x4B" + "\xBB\xE2\xE1\x63\xE8\x7D\xD3\x7F\x49\xBE\xCA\x92\x02\x77\x64\xF6" + "\x8C\xF5\x1F\x14\xD7\x98\xC2\x27\x3F\x35\xDF\x57\x4D\x1F\x93\x2E" + "\x40\xC4\xFF\x25\x5B\x36\xA2\x66", + .clen = 16 + 16 + 24, + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV + .alen = 16, + }, { + /* "enc plain>block" */ + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x20") + "\x69\xB1\x65\x14\xE3\xCD\x8E\x56\xB8\x20\x10\xD5\xC7\x30\x12\xB6" + "\x22\xC4\xD0\x0F\xFC\x23\xED\x1F" // Ki + "\x56\xAB\x22\xBE\xE6\x3D\x82\xD7\xBC\x52\x27\xF6\x77\x3F\x8E\xA7" + "\xA5\xEB\x1C\x82\x51\x60\xC3\x83\x12\x98\x0C\x44\x2E\x5C\x7E\x49", // Ke + .klen = 4 + 4 + 32 + 24, + .ptext = + "\x76\x3E\x65\x36\x7E\x86\x4F\x02\xF5\x51\x53\xC7\xE3\xB5\x8A\xF1" // Confounder + "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F" + "\x10\x11\x12\x13\x14", // Plain + .plen = 16 + 21, + .ctext = + "\x40\x01\x3E\x2D\xF5\x8E\x87\x51\x95\x7D\x28\x78\xBC\xD2\xD6\xFE" + "\x10\x1C\xCF\xD5\x56\xCB\x1E\xAE\x79\xDB\x3C\x3E\xE8\x64\x29\xF2" + "\xB2\xA6\x02\xAC\x86\xFE\xF6\xEC\xB6\x47\xD6\x29\x5F\xAE\x07\x7A" + "\x1F\xEB\x51\x75\x08\xD2\xC1\x6B\x41\x92\xE0\x1F\x62", + .clen = 16 + 21 + 24, + .assoc = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // IV + .alen = 16, + }, +}; + +static const struct aead_testvec krb5_test_camellia_cts_cmac[] = { + /* rfc6803 sec 10 */ + { + // "enc no plain" + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x10") + "\x45\xeb\x66\xe2\xef\xa8\x77\x8f\x7d\xf1\x46\x54\x53\x05\x98\x06" // Ki + "\xe9\x9b\x82\xb3\x6c\x4a\xe8\xea\x19\xe9\x5d\xfa\x9e\xde\x88\x2c", // Ke + .klen = 4 + 4 + 16 * 2, + .ptext = + "\xB6\x98\x22\xA1\x9A\x6B\x09\xC0\xEB\xC8\x55\x7D\x1F\x1B\x6C\x0A" // Confounder + "", // Plain + .plen = 16 + 0, + .ctext = + "\xC4\x66\xF1\x87\x10\x69\x92\x1E\xDB\x7C\x6F\xDE\x24\x4A\x52\xDB" + "\x0B\xA1\x0E\xDC\x19\x7B\xDB\x80\x06\x65\x8C\xA3\xCC\xCE\x6E\xB8", + .clen = 16 + 0 + 16, + }, { + // "enc 1 plain", + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x10") + "\x13\x5f\xe7\x11\x6f\x53\xc2\xaa\x36\x12\xb7\xea\xe0\xf2\x84\xaa" // Ki + "\xa7\xed\xcd\x53\x97\xea\x6d\x12\xb0\xaf\xf4\xcb\x8d\xaa\x57\xad", // Ke + .klen = 4 + 4 + 16 * 2, + .ptext = + "\x6F\x2F\xC3\xC2\xA1\x66\xFD\x88\x98\x96\x7A\x83\xDE\x95\x96\xD9" // Confounder + "1", // Plain + .plen = 16 + 1, + .ctext = + "\x84\x2D\x21\xFD\x95\x03\x11\xC0\xDD\x46\x4A\x3F\x4B\xE8\xD6\xDA" + "\x88\xA5\x6D\x55\x9C\x9B\x47\xD3\xF9\xA8\x50\x67\xAF\x66\x15\x59" + "\xB8", + .clen = 16 + 1 + 16, + }, { + // "enc 9 plain", + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x10") + "\x10\x2c\x34\xd0\x75\x74\x9f\x77\x8a\x15\xca\xd1\xe9\x7d\xa9\x86" // Ki + "\xdd\xe4\x2e\xca\x7c\xd9\x86\x3f\xc3\xce\x89\xcb\xc9\x43\x62\xd7", // Ke + .klen = 4 + 4 + 16 * 2, + .ptext = + "\xA5\xB4\xA7\x1E\x07\x7A\xEE\xF9\x3C\x87\x63\xC1\x8F\xDB\x1F\x10" // Confounder + "9 bytesss", // Plain + .plen = 16 + 9, + .ctext = + "\x61\x9F\xF0\x72\xE3\x62\x86\xFF\x0A\x28\xDE\xB3\xA3\x52\xEC\x0D" + "\x0E\xDF\x5C\x51\x60\xD6\x63\xC9\x01\x75\x8C\xCF\x9D\x1E\xD3\x3D" + "\x71\xDB\x8F\x23\xAA\xBF\x83\x48\xA0", + .clen = 16 + 9 + 16, + }, { + // "enc 13 plain", + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x10") + "\xb8\xc4\x38\xcc\x1a\x00\x60\xfc\x91\x3a\x8e\x07\x16\x96\xbd\x08" // Ki + "\xc3\x11\x3a\x25\x85\x90\xb9\xae\xbf\x72\x1b\x1a\xf6\xb0\xcb\xf8", // Ke + .klen = 4 + 4 + 16 * 2, + .ptext = + "\x19\xFE\xE4\x0D\x81\x0C\x52\x4B\x5B\x22\xF0\x18\x74\xC6\x93\xDA" // Confounder + "13 bytes byte", // Plain + .plen = 16 + 13, + .ctext = + "\xB8\xEC\xA3\x16\x7A\xE6\x31\x55\x12\xE5\x9F\x98\xA7\xC5\x00\x20" + "\x5E\x5F\x63\xFF\x3B\xB3\x89\xAF\x1C\x41\xA2\x1D\x64\x0D\x86\x15" + "\xC9\xED\x3F\xBE\xB0\x5A\xB6\xAC\xB6\x76\x89\xB5\xEA", + .clen = 16 + 13 + 16, + }, { + // "enc 30 plain", + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x10") + "\x18\xaf\x19\xb0\x23\x74\x44\xfd\x75\x04\xad\x7d\xbd\x48\xad\xd3" // Ki + "\x8b\x07\xee\xd3\x01\x49\x91\x6a\xa2\x0d\xb3\xf5\xce\xd8\xaf\xad", // Ke + .klen = 4 + 4 + 16 * 2, + .ptext = + "\xCA\x7A\x7A\xB4\xBE\x19\x2D\xAB\xD6\x03\x50\x6D\xB1\x9C\x39\xE2" // Confounder + "30 bytes bytes bytes bytes byt", // Plain + .plen = 16 + 30, + .ctext = + "\xA2\x6A\x39\x05\xA4\xFF\xD5\x81\x6B\x7B\x1E\x27\x38\x0D\x08\x09" + "\x0C\x8E\xC1\xF3\x04\x49\x6E\x1A\xBD\xCD\x2B\xDC\xD1\xDF\xFC\x66" + "\x09\x89\xE1\x17\xA7\x13\xDD\xBB\x57\xA4\x14\x6C\x15\x87\xCB\xA4" + "\x35\x66\x65\x59\x1D\x22\x40\x28\x2F\x58\x42\xB1\x05\xA5", + .clen = 16 + 30 + 16, + }, { + // "enc no plain", + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x20") + "\xa2\xb8\x33\xe9\x43\xbb\x10\xee\x53\xb4\xa1\x9b\xc2\xbb\xc7\xe1" + "\x9b\x87\xad\x5d\xe9\x21\x22\xa4\x33\x8b\xe6\xf7\x32\xfd\x8a\x0e" // Ki + "\x6c\xcb\x3f\x25\xd8\xae\x57\xf4\xe8\xf6\xca\x47\x4b\xdd\xef\xf1" + "\x16\xce\x13\x1b\x3f\x71\x01\x2e\x75\x6d\x6b\x1e\x3f\x70\xa7\xf1", // Ke + .klen = 4 + 4 + 32 * 2, + .ptext = + "\x3C\xBB\xD2\xB4\x59\x17\x94\x10\x67\xF9\x65\x99\xBB\x98\x92\x6C" // Confounder + "", // Plain + .plen = 16 + 0, + .ctext = + "\x03\x88\x6D\x03\x31\x0B\x47\xA6\xD8\xF0\x6D\x7B\x94\xD1\xDD\x83" + "\x7E\xCC\xE3\x15\xEF\x65\x2A\xFF\x62\x08\x59\xD9\x4A\x25\x92\x66", + .clen = 16 + 0 + 16, + }, { + // "enc 1 plain", + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x20") + "\x84\x61\x4b\xfa\x98\xf1\x74\x8a\xa4\xaf\x99\x2b\x8c\x26\x28\x0d" + "\xc8\x98\x73\x29\xdf\x77\x5c\x1d\xb0\x4a\x43\xf1\x21\xaa\x86\x65" // Ki + "\xe9\x31\x73\xaa\x01\xeb\x3c\x24\x62\x31\xda\xfc\x78\x02\xee\x32" + "\xaf\x24\x85\x1d\x8c\x73\x87\xd1\x8c\xb9\xb2\xc5\xb7\xf5\x70\xb8", // Ke + .klen = 4 + 4 + 32 * 2, + .ptext = + "\xDE\xF4\x87\xFC\xEB\xE6\xDE\x63\x46\xD4\xDA\x45\x21\xBB\xA2\xD2" // Confounder + "1", // Plain + .plen = 16 + 1, + .ctext = + "\x2C\x9C\x15\x70\x13\x3C\x99\xBF\x6A\x34\xBC\x1B\x02\x12\x00\x2F" + "\xD1\x94\x33\x87\x49\xDB\x41\x35\x49\x7A\x34\x7C\xFC\xD9\xD1\x8A" + "\x12", + .clen = 16 + 1 + 16, + }, { + // "enc 9 plain", + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x20") + "\x47\xb9\xf5\xba\xd7\x63\x00\x58\x2a\x54\x45\xfa\x0c\x1b\x29\xc3" + "\xaa\x83\xec\x63\xb9\x0b\x4a\xb0\x08\x48\xc1\x85\x67\x4f\x44\xa7" // Ki + "\xcd\xa2\xd3\x9a\x9b\x24\x3f\xfe\xb5\x6e\x8d\x5f\x4b\xd5\x28\x74" + "\x1e\xcb\x52\x0c\x62\x12\x3f\xb0\x40\xb8\x41\x8b\x15\xc7\xd7\x0c", // Ke + .klen = 4 + 4 + 32 * 2, + .ptext = + "\xAD\x4F\xF9\x04\xD3\x4E\x55\x53\x84\xB1\x41\x00\xFC\x46\x5F\x88" // Confounder + "9 bytesss", // Plain + .plen = 16 + 9, + .ctext = + "\x9C\x6D\xE7\x5F\x81\x2D\xE7\xED\x0D\x28\xB2\x96\x35\x57\xA1\x15" + "\x64\x09\x98\x27\x5B\x0A\xF5\x15\x27\x09\x91\x3F\xF5\x2A\x2A\x9C" + "\x8E\x63\xB8\x72\xF9\x2E\x64\xC8\x39", + .clen = 16 + 9 + 16, + }, { + // "enc 13 plain", + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x20") + "\x15\x2f\x8c\x9d\xc9\x85\x79\x6e\xb1\x94\xed\x14\xc5\x9e\xac\xdd" + "\x41\x8a\x33\x32\x36\xb7\x8f\xaf\xa7\xc7\x9b\x04\xe0\xac\xe7\xbf" // Ki + "\xcd\x8a\x10\xe2\x79\xda\xdd\xb6\x90\x1e\xc3\x0b\xdf\x98\x73\x25" + "\x0f\x6e\xfc\x6a\x77\x36\x7d\x74\xdc\x3e\xe7\xf7\x4b\xc7\x77\x4e", // Ke + .klen = 4 + 4 + 32 * 2, + .ptext = + "\xCF\x9B\xCA\x6D\xF1\x14\x4E\x0C\x0A\xF9\xB8\xF3\x4C\x90\xD5\x14" // Confounder + "13 bytes byte", + .plen = 16 + 13, + .ctext = + "\xEE\xEC\x85\xA9\x81\x3C\xDC\x53\x67\x72\xAB\x9B\x42\xDE\xFC\x57" + "\x06\xF7\x26\xE9\x75\xDD\xE0\x5A\x87\xEB\x54\x06\xEA\x32\x4C\xA1" + "\x85\xC9\x98\x6B\x42\xAA\xBE\x79\x4B\x84\x82\x1B\xEE", + .clen = 16 + 13 + 16, + }, { + // "enc 30 plain", + .key = + AUTHENC_KEY_HEADER("\x00\x00\x00\x20") + "\x04\x8d\xeb\xf7\xb1\x2c\x09\x32\xe8\xb2\x96\x99\x6c\x23\xf8\xb7" + "\x9d\x59\xb9\x7e\xa1\x19\xfc\x0c\x15\x6b\xf7\x88\xdc\x8c\x85\xe8" // Ki + "\x1d\x51\x47\xf3\x4b\xb0\x01\xa0\x4a\x68\xa7\x13\x46\xe7\x65\x4e" + "\x02\x23\xa6\x0d\x90\xbc\x2b\x79\xb4\xd8\x79\x56\xd4\x7c\xd4\x2a", // Ke + .klen = 4 + 4 + 32 * 2, + .ptext = + "\x64\x4D\xEF\x38\xDA\x35\x00\x72\x75\x87\x8D\x21\x68\x55\xE2\x28" // Confounder + "30 bytes bytes bytes bytes byt", // Plain + .plen = 16 + 30, + .ctext = + "\x0E\x44\x68\x09\x85\x85\x5F\x2D\x1F\x18\x12\x52\x9C\xA8\x3B\xFD" + "\x8E\x34\x9D\xE6\xFD\x9A\xDA\x0B\xAA\xA0\x48\xD6\x8E\x26\x5F\xEB" + "\xF3\x4A\xD1\x25\x5A\x34\x49\x99\xAD\x37\x14\x68\x87\xA6\xC6\x84" + "\x57\x31\xAC\x7F\x46\x37\x6A\x05\x04\xCD\x06\x57\x14\x74", + .clen = 16 + 30 + 16, + }, +}; + #endif /* _CRYPTO_TESTMGR_H */ diff --git a/crypto/xctr.c b/crypto/xctr.c index 6ed9c85ededa..9c536ab6d2e5 100644 --- a/crypto/xctr.c +++ b/crypto/xctr.c @@ -78,7 +78,7 @@ static int crypto_xctr_crypt_inplace(struct skcipher_walk *walk, crypto_cipher_alg(tfm)->cia_encrypt; unsigned long alignmask = crypto_cipher_alignmask(tfm); unsigned int nbytes = walk->nbytes; - u8 *data = walk->src.virt.addr; + u8 *data = walk->dst.virt.addr; u8 tmp[XCTR_BLOCKSIZE + MAX_CIPHER_ALIGNMASK]; u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1); __le32 ctr32 = cpu_to_le32(byte_ctr / XCTR_BLOCKSIZE + 1); diff --git a/crypto/xts.c b/crypto/xts.c index 821060ede2cf..31529c9ef08f 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -99,7 +99,7 @@ static int xts_xor_tweak(struct skcipher_request *req, bool second_pass, while (w.nbytes) { unsigned int avail = w.nbytes; - le128 *wsrc; + const le128 *wsrc; le128 *wdst; wsrc = w.src.virt.addr; diff --git a/crypto/zstd.c b/crypto/zstd.c index 154a969c83a8..90bb4f36f846 100644 --- a/crypto/zstd.c +++ b/crypto/zstd.c @@ -103,7 +103,7 @@ static int __zstd_init(void *ctx) return ret; } -static void *zstd_alloc_ctx(struct crypto_scomp *tfm) +static void *zstd_alloc_ctx(void) { int ret; struct zstd_ctx *ctx; @@ -121,32 +121,18 @@ static void *zstd_alloc_ctx(struct crypto_scomp *tfm) return ctx; } -static int zstd_init(struct crypto_tfm *tfm) -{ - struct zstd_ctx *ctx = crypto_tfm_ctx(tfm); - - return __zstd_init(ctx); -} - static void __zstd_exit(void *ctx) { zstd_comp_exit(ctx); zstd_decomp_exit(ctx); } -static void zstd_free_ctx(struct crypto_scomp *tfm, void *ctx) +static void zstd_free_ctx(void *ctx) { __zstd_exit(ctx); kfree_sensitive(ctx); } -static void zstd_exit(struct crypto_tfm *tfm) -{ - struct zstd_ctx *ctx = crypto_tfm_ctx(tfm); - - __zstd_exit(ctx); -} - static int __zstd_compress(const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) { @@ -161,14 +147,6 @@ static int __zstd_compress(const u8 *src, unsigned int slen, return 0; } -static int zstd_compress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) -{ - struct zstd_ctx *ctx = crypto_tfm_ctx(tfm); - - return __zstd_compress(src, slen, dst, dlen, ctx); -} - static int zstd_scompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) @@ -189,14 +167,6 @@ static int __zstd_decompress(const u8 *src, unsigned int slen, return 0; } -static int zstd_decompress(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen) -{ - struct zstd_ctx *ctx = crypto_tfm_ctx(tfm); - - return __zstd_decompress(src, slen, dst, dlen, ctx); -} - static int zstd_sdecompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) @@ -204,19 +174,6 @@ static int zstd_sdecompress(struct crypto_scomp *tfm, const u8 *src, return __zstd_decompress(src, slen, dst, dlen, ctx); } -static struct crypto_alg alg = { - .cra_name = "zstd", - .cra_driver_name = "zstd-generic", - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct zstd_ctx), - .cra_module = THIS_MODULE, - .cra_init = zstd_init, - .cra_exit = zstd_exit, - .cra_u = { .compress = { - .coa_compress = zstd_compress, - .coa_decompress = zstd_decompress } } -}; - static struct scomp_alg scomp = { .alloc_ctx = zstd_alloc_ctx, .free_ctx = zstd_free_ctx, @@ -231,22 +188,11 @@ static struct scomp_alg scomp = { static int __init zstd_mod_init(void) { - int ret; - - ret = crypto_register_alg(&alg); - if (ret) - return ret; - - ret = crypto_register_scomp(&scomp); - if (ret) - crypto_unregister_alg(&alg); - - return ret; + return crypto_register_scomp(&scomp); } static void __exit zstd_mod_fini(void) { - crypto_unregister_alg(&alg); crypto_unregister_scomp(&scomp); } diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 17854f052386..c85827843447 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -534,10 +534,10 @@ config HW_RANDOM_NPCM If unsure, say Y. config HW_RANDOM_KEYSTONE + tristate "TI Keystone NETCP SA Hardware random number generator" depends on ARCH_KEYSTONE || COMPILE_TEST depends on HAS_IOMEM && OF default HW_RANDOM - tristate "TI Keystone NETCP SA Hardware random number generator" help This option enables Keystone's hardware random generator. @@ -579,15 +579,15 @@ config HW_RANDOM_ARM_SMCCC_TRNG module will be called arm_smccc_trng. config HW_RANDOM_CN10K - tristate "Marvell CN10K Random Number Generator support" - depends on HW_RANDOM && PCI && (ARM64 || (64BIT && COMPILE_TEST)) - default HW_RANDOM if ARCH_THUNDER - help - This driver provides support for the True Random Number - generator available in Marvell CN10K SoCs. + tristate "Marvell CN10K Random Number Generator support" + depends on HW_RANDOM && PCI && (ARM64 || (64BIT && COMPILE_TEST)) + default HW_RANDOM if ARCH_THUNDER + help + This driver provides support for the True Random Number + generator available in Marvell CN10K SoCs. - To compile this driver as a module, choose M here. - The module will be called cn10k_rng. If unsure, say Y. + To compile this driver as a module, choose M here. + The module will be called cn10k_rng. If unsure, say Y. config HW_RANDOM_JH7110 tristate "StarFive JH7110 Random Number Generator support" @@ -606,7 +606,8 @@ config HW_RANDOM_ROCKCHIP default HW_RANDOM help This driver provides kernel-side support for the True Random Number - Generator hardware found on some Rockchip SoC like RK3566 or RK3568. + Generator hardware found on some Rockchip SoCs like RK3566, RK3568 + or RK3588. To compile this driver as a module, choose M here: the module will be called rockchip-rng. diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c index 118a72acb99b..241664a9b5d9 100644 --- a/drivers/char/hw_random/imx-rngc.c +++ b/drivers/char/hw_random/imx-rngc.c @@ -13,6 +13,8 @@ #include <linux/clk.h> #include <linux/err.h> #include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/pm_runtime.h> #include <linux/interrupt.h> #include <linux/hw_random.h> #include <linux/completion.h> @@ -53,6 +55,7 @@ #define RNGC_SELFTEST_TIMEOUT 2500 /* us */ #define RNGC_SEED_TIMEOUT 200 /* ms */ +#define RNGC_PM_TIMEOUT 500 /* ms */ static bool self_test = true; module_param(self_test, bool, 0); @@ -123,7 +126,11 @@ static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng); unsigned int status; - int retval = 0; + int err, retval = 0; + + err = pm_runtime_resume_and_get(rngc->dev); + if (err) + return err; while (max >= sizeof(u32)) { status = readl(rngc->base + RNGC_STATUS); @@ -141,6 +148,8 @@ static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait) max -= sizeof(u32); } } + pm_runtime_mark_last_busy(rngc->dev); + pm_runtime_put(rngc->dev); return retval ? retval : -EIO; } @@ -169,7 +178,11 @@ static int imx_rngc_init(struct hwrng *rng) { struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng); u32 cmd, ctrl; - int ret; + int ret, err; + + err = pm_runtime_resume_and_get(rngc->dev); + if (err) + return err; /* clear error */ cmd = readl(rngc->base + RNGC_COMMAND); @@ -186,15 +199,15 @@ static int imx_rngc_init(struct hwrng *rng) ret = wait_for_completion_timeout(&rngc->rng_op_done, msecs_to_jiffies(RNGC_SEED_TIMEOUT)); if (!ret) { - ret = -ETIMEDOUT; - goto err; + err = -ETIMEDOUT; + goto out; } } while (rngc->err_reg == RNGC_ERROR_STATUS_STAT_ERR); if (rngc->err_reg) { - ret = -EIO; - goto err; + err = -EIO; + goto out; } /* @@ -205,23 +218,29 @@ static int imx_rngc_init(struct hwrng *rng) ctrl |= RNGC_CTRL_AUTO_SEED; writel(ctrl, rngc->base + RNGC_CONTROL); +out: /* * if initialisation was successful, we keep the interrupt * unmasked until imx_rngc_cleanup is called * we mask the interrupt ourselves if we return an error */ - return 0; + if (err) + imx_rngc_irq_mask_clear(rngc); -err: - imx_rngc_irq_mask_clear(rngc); - return ret; + pm_runtime_put(rngc->dev); + return err; } static void imx_rngc_cleanup(struct hwrng *rng) { struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng); + int err; - imx_rngc_irq_mask_clear(rngc); + err = pm_runtime_resume_and_get(rngc->dev); + if (!err) { + imx_rngc_irq_mask_clear(rngc); + pm_runtime_put(rngc->dev); + } } static int __init imx_rngc_probe(struct platform_device *pdev) @@ -240,7 +259,7 @@ static int __init imx_rngc_probe(struct platform_device *pdev) if (IS_ERR(rngc->base)) return PTR_ERR(rngc->base); - rngc->clk = devm_clk_get_enabled(&pdev->dev, NULL); + rngc->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(rngc->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(rngc->clk), "Cannot get rng_clk\n"); @@ -248,14 +267,18 @@ static int __init imx_rngc_probe(struct platform_device *pdev) if (irq < 0) return irq; + clk_prepare_enable(rngc->clk); + ver_id = readl(rngc->base + RNGC_VER_ID); rng_type = FIELD_GET(RNG_TYPE, ver_id); /* * This driver supports only RNGC and RNGB. (There's a different * driver for RNGA.) */ - if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB) + if (rng_type != RNGC_TYPE_RNGC && rng_type != RNGC_TYPE_RNGB) { + clk_disable_unprepare(rngc->clk); return -ENODEV; + } init_completion(&rngc->rng_op_done); @@ -272,15 +295,24 @@ static int __init imx_rngc_probe(struct platform_device *pdev) ret = devm_request_irq(&pdev->dev, irq, imx_rngc_irq, 0, pdev->name, (void *)rngc); - if (ret) + if (ret) { + clk_disable_unprepare(rngc->clk); return dev_err_probe(&pdev->dev, ret, "Can't get interrupt working.\n"); + } if (self_test) { ret = imx_rngc_self_test(rngc); - if (ret) + if (ret) { + clk_disable_unprepare(rngc->clk); return dev_err_probe(&pdev->dev, ret, "self test failed\n"); + } } + pm_runtime_set_autosuspend_delay(&pdev->dev, RNGC_PM_TIMEOUT); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + devm_pm_runtime_enable(&pdev->dev); + ret = devm_hwrng_register(&pdev->dev, &rngc->rng); if (ret) return dev_err_probe(&pdev->dev, ret, "hwrng registration failed\n"); @@ -310,7 +342,10 @@ static int imx_rngc_resume(struct device *dev) return 0; } -static DEFINE_SIMPLE_DEV_PM_OPS(imx_rngc_pm_ops, imx_rngc_suspend, imx_rngc_resume); +static const struct dev_pm_ops imx_rngc_pm_ops = { + SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) + RUNTIME_PM_OPS(imx_rngc_suspend, imx_rngc_resume, NULL) +}; static const struct of_device_id imx_rngc_dt_ids[] = { { .compatible = "fsl,imx25-rngb" }, @@ -321,7 +356,7 @@ MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids); static struct platform_driver imx_rngc_driver = { .driver = { .name = KBUILD_MODNAME, - .pm = pm_sleep_ptr(&imx_rngc_pm_ops), + .pm = pm_ptr(&imx_rngc_pm_ops), .of_match_table = imx_rngc_dt_ids, }, }; diff --git a/drivers/char/hw_random/rockchip-rng.c b/drivers/char/hw_random/rockchip-rng.c index 289b385bbf05..161050591663 100644 --- a/drivers/char/hw_random/rockchip-rng.c +++ b/drivers/char/hw_random/rockchip-rng.c @@ -1,12 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 /* - * rockchip-rng.c True Random Number Generator driver for Rockchip RK3568 SoC + * rockchip-rng.c True Random Number Generator driver for Rockchip SoCs * * Copyright (c) 2018, Fuzhou Rockchip Electronics Co., Ltd. * Copyright (c) 2022, Aurelien Jarno + * Copyright (c) 2025, Collabora Ltd. * Authors: * Lin Jinhan <troy.lin@rock-chips.com> * Aurelien Jarno <aurelien@aurel32.net> + * Nicolas Frattaroli <nicolas.frattaroli@collabora.com> */ #include <linux/clk.h> #include <linux/hw_random.h> @@ -32,6 +34,9 @@ */ #define RK_RNG_SAMPLE_CNT 1000 +/* after how many bytes of output TRNGv1 implementations should be reseeded */ +#define RK_TRNG_V1_AUTO_RESEED_CNT 16000 + /* TRNG registers from RK3568 TRM-Part2, section 5.4.1 */ #define TRNG_RST_CTL 0x0004 #define TRNG_RNG_CTL 0x0400 @@ -49,11 +54,64 @@ #define TRNG_RNG_SAMPLE_CNT 0x0404 #define TRNG_RNG_DOUT 0x0410 +/* + * TRNG V1 register definitions + * The TRNG V1 IP is a stand-alone TRNG implementation (not part of a crypto IP) + * and can be found in the Rockchip RK3588 SoC + */ +#define TRNG_V1_CTRL 0x0000 +#define TRNG_V1_CTRL_NOP 0x00 +#define TRNG_V1_CTRL_RAND 0x01 +#define TRNG_V1_CTRL_SEED 0x02 + +#define TRNG_V1_STAT 0x0004 +#define TRNG_V1_STAT_SEEDED BIT(9) +#define TRNG_V1_STAT_GENERATING BIT(30) +#define TRNG_V1_STAT_RESEEDING BIT(31) + +#define TRNG_V1_MODE 0x0008 +#define TRNG_V1_MODE_128_BIT (0x00 << 3) +#define TRNG_V1_MODE_256_BIT (0x01 << 3) + +/* Interrupt Enable register; unused because polling is faster */ +#define TRNG_V1_IE 0x0010 +#define TRNG_V1_IE_GLBL_EN BIT(31) +#define TRNG_V1_IE_SEED_DONE_EN BIT(1) +#define TRNG_V1_IE_RAND_RDY_EN BIT(0) + +#define TRNG_V1_ISTAT 0x0014 +#define TRNG_V1_ISTAT_RAND_RDY BIT(0) + +/* RAND0 ~ RAND7 */ +#define TRNG_V1_RAND0 0x0020 +#define TRNG_V1_RAND7 0x003C + +/* Auto Reseed Register */ +#define TRNG_V1_AUTO_RQSTS 0x0060 + +#define TRNG_V1_VERSION 0x00F0 +#define TRNG_v1_VERSION_CODE 0x46bc +/* end of TRNG_V1 register definitions */ + +/* Before removing this assert, give rk3588_rng_read an upper bound of 32 */ +static_assert(RK_RNG_MAX_BYTE <= (TRNG_V1_RAND7 + 4 - TRNG_V1_RAND0), + "You raised RK_RNG_MAX_BYTE and broke rk3588-rng, congrats."); + struct rk_rng { struct hwrng rng; void __iomem *base; int clk_num; struct clk_bulk_data *clk_bulks; + const struct rk_rng_soc_data *soc_data; + struct device *dev; +}; + +struct rk_rng_soc_data { + int (*rk_rng_init)(struct hwrng *rng); + int (*rk_rng_read)(struct hwrng *rng, void *buf, size_t max, bool wait); + void (*rk_rng_cleanup)(struct hwrng *rng); + unsigned short quality; + bool reset_optional; }; /* The mask in the upper 16 bits determines the bits that are updated */ @@ -62,19 +120,38 @@ static void rk_rng_write_ctl(struct rk_rng *rng, u32 val, u32 mask) writel((mask << 16) | val, rng->base + TRNG_RNG_CTL); } -static int rk_rng_init(struct hwrng *rng) +static inline void rk_rng_writel(struct rk_rng *rng, u32 val, u32 offset) { - struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); - int ret; + writel(val, rng->base + offset); +} +static inline u32 rk_rng_readl(struct rk_rng *rng, u32 offset) +{ + return readl(rng->base + offset); +} + +static int rk_rng_enable_clks(struct rk_rng *rk_rng) +{ + int ret; /* start clocks */ ret = clk_bulk_prepare_enable(rk_rng->clk_num, rk_rng->clk_bulks); if (ret < 0) { - dev_err((struct device *) rk_rng->rng.priv, - "Failed to enable clks %d\n", ret); + dev_err(rk_rng->dev, "Failed to enable clocks: %d\n", ret); return ret; } + return 0; +} + +static int rk3568_rng_init(struct hwrng *rng) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + int ret; + + ret = rk_rng_enable_clks(rk_rng); + if (ret < 0) + return ret; + /* set the sample period */ writel(RK_RNG_SAMPLE_CNT, rk_rng->base + TRNG_RNG_SAMPLE_CNT); @@ -87,7 +164,7 @@ static int rk_rng_init(struct hwrng *rng) return 0; } -static void rk_rng_cleanup(struct hwrng *rng) +static void rk3568_rng_cleanup(struct hwrng *rng) { struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); @@ -98,14 +175,14 @@ static void rk_rng_cleanup(struct hwrng *rng) clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks); } -static int rk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +static int rk3568_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) { struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); size_t to_read = min_t(size_t, max, RK_RNG_MAX_BYTE); u32 reg; int ret = 0; - ret = pm_runtime_resume_and_get((struct device *) rk_rng->rng.priv); + ret = pm_runtime_resume_and_get(rk_rng->dev); if (ret < 0) return ret; @@ -122,12 +199,120 @@ static int rk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) /* Read random data stored in the registers */ memcpy_fromio(buf, rk_rng->base + TRNG_RNG_DOUT, to_read); out: - pm_runtime_mark_last_busy((struct device *) rk_rng->rng.priv); - pm_runtime_put_sync_autosuspend((struct device *) rk_rng->rng.priv); + pm_runtime_mark_last_busy(rk_rng->dev); + pm_runtime_put_sync_autosuspend(rk_rng->dev); + + return (ret < 0) ? ret : to_read; +} + +static int rk3588_rng_init(struct hwrng *rng) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + u32 version, status, mask, istat; + int ret; + + ret = rk_rng_enable_clks(rk_rng); + if (ret < 0) + return ret; + + version = rk_rng_readl(rk_rng, TRNG_V1_VERSION); + if (version != TRNG_v1_VERSION_CODE) { + dev_err(rk_rng->dev, + "wrong trng version, expected = %08x, actual = %08x\n", + TRNG_V1_VERSION, version); + ret = -EFAULT; + goto err_disable_clk; + } + + mask = TRNG_V1_STAT_SEEDED | TRNG_V1_STAT_GENERATING | + TRNG_V1_STAT_RESEEDING; + if (readl_poll_timeout(rk_rng->base + TRNG_V1_STAT, status, + (status & mask) == TRNG_V1_STAT_SEEDED, + RK_RNG_POLL_PERIOD_US, RK_RNG_POLL_TIMEOUT_US) < 0) { + dev_err(rk_rng->dev, "timed out waiting for hwrng to reseed\n"); + ret = -ETIMEDOUT; + goto err_disable_clk; + } + + /* + * clear ISTAT flag, downstream advises to do this to avoid + * auto-reseeding "on power on" + */ + istat = rk_rng_readl(rk_rng, TRNG_V1_ISTAT); + rk_rng_writel(rk_rng, istat, TRNG_V1_ISTAT); + + /* auto reseed after RK_TRNG_V1_AUTO_RESEED_CNT bytes */ + rk_rng_writel(rk_rng, RK_TRNG_V1_AUTO_RESEED_CNT / 16, TRNG_V1_AUTO_RQSTS); + + return 0; +err_disable_clk: + clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks); + return ret; +} + +static void rk3588_rng_cleanup(struct hwrng *rng) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + + clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks); +} + +static int rk3588_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng); + size_t to_read = min_t(size_t, max, RK_RNG_MAX_BYTE); + int ret = 0; + u32 reg; + + ret = pm_runtime_resume_and_get(rk_rng->dev); + if (ret < 0) + return ret; + + /* Clear ISTAT, even without interrupts enabled, this will be updated */ + reg = rk_rng_readl(rk_rng, TRNG_V1_ISTAT); + rk_rng_writel(rk_rng, reg, TRNG_V1_ISTAT); + + /* generate 256 bits of random data */ + rk_rng_writel(rk_rng, TRNG_V1_MODE_256_BIT, TRNG_V1_MODE); + rk_rng_writel(rk_rng, TRNG_V1_CTRL_RAND, TRNG_V1_CTRL); + + ret = readl_poll_timeout_atomic(rk_rng->base + TRNG_V1_ISTAT, reg, + (reg & TRNG_V1_ISTAT_RAND_RDY), 0, + RK_RNG_POLL_TIMEOUT_US); + if (ret < 0) + goto out; + + /* Read random data that's in registers TRNG_V1_RAND0 through RAND7 */ + memcpy_fromio(buf, rk_rng->base + TRNG_V1_RAND0, to_read); + +out: + /* Clear ISTAT */ + rk_rng_writel(rk_rng, reg, TRNG_V1_ISTAT); + /* close the TRNG */ + rk_rng_writel(rk_rng, TRNG_V1_CTRL_NOP, TRNG_V1_CTRL); + + pm_runtime_mark_last_busy(rk_rng->dev); + pm_runtime_put_sync_autosuspend(rk_rng->dev); return (ret < 0) ? ret : to_read; } +static const struct rk_rng_soc_data rk3568_soc_data = { + .rk_rng_init = rk3568_rng_init, + .rk_rng_read = rk3568_rng_read, + .rk_rng_cleanup = rk3568_rng_cleanup, + .quality = 900, + .reset_optional = false, +}; + +static const struct rk_rng_soc_data rk3588_soc_data = { + .rk_rng_init = rk3588_rng_init, + .rk_rng_read = rk3588_rng_read, + .rk_rng_cleanup = rk3588_rng_cleanup, + .quality = 999, /* as determined by actual testing */ + .reset_optional = true, +}; + static int rk_rng_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -139,6 +324,7 @@ static int rk_rng_probe(struct platform_device *pdev) if (!rk_rng) return -ENOMEM; + rk_rng->soc_data = of_device_get_match_data(dev); rk_rng->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(rk_rng->base)) return PTR_ERR(rk_rng->base); @@ -148,34 +334,40 @@ static int rk_rng_probe(struct platform_device *pdev) return dev_err_probe(dev, rk_rng->clk_num, "Failed to get clks property\n"); - rst = devm_reset_control_array_get_exclusive(&pdev->dev); - if (IS_ERR(rst)) - return dev_err_probe(dev, PTR_ERR(rst), "Failed to get reset property\n"); + if (rk_rng->soc_data->reset_optional) + rst = devm_reset_control_array_get_optional_exclusive(dev); + else + rst = devm_reset_control_array_get_exclusive(dev); - reset_control_assert(rst); - udelay(2); - reset_control_deassert(rst); + if (rst) { + if (IS_ERR(rst)) + return dev_err_probe(dev, PTR_ERR(rst), "Failed to get reset property\n"); + + reset_control_assert(rst); + udelay(2); + reset_control_deassert(rst); + } platform_set_drvdata(pdev, rk_rng); rk_rng->rng.name = dev_driver_string(dev); if (!IS_ENABLED(CONFIG_PM)) { - rk_rng->rng.init = rk_rng_init; - rk_rng->rng.cleanup = rk_rng_cleanup; + rk_rng->rng.init = rk_rng->soc_data->rk_rng_init; + rk_rng->rng.cleanup = rk_rng->soc_data->rk_rng_cleanup; } - rk_rng->rng.read = rk_rng_read; - rk_rng->rng.priv = (unsigned long) dev; - rk_rng->rng.quality = 900; + rk_rng->rng.read = rk_rng->soc_data->rk_rng_read; + rk_rng->dev = dev; + rk_rng->rng.quality = rk_rng->soc_data->quality; pm_runtime_set_autosuspend_delay(dev, RK_RNG_AUTOSUSPEND_DELAY); pm_runtime_use_autosuspend(dev); ret = devm_pm_runtime_enable(dev); if (ret) - return dev_err_probe(&pdev->dev, ret, "Runtime pm activation failed.\n"); + return dev_err_probe(dev, ret, "Runtime pm activation failed.\n"); ret = devm_hwrng_register(dev, &rk_rng->rng); if (ret) - return dev_err_probe(&pdev->dev, ret, "Failed to register Rockchip hwrng\n"); + return dev_err_probe(dev, ret, "Failed to register Rockchip hwrng\n"); return 0; } @@ -184,7 +376,7 @@ static int __maybe_unused rk_rng_runtime_suspend(struct device *dev) { struct rk_rng *rk_rng = dev_get_drvdata(dev); - rk_rng_cleanup(&rk_rng->rng); + rk_rng->soc_data->rk_rng_cleanup(&rk_rng->rng); return 0; } @@ -193,7 +385,7 @@ static int __maybe_unused rk_rng_runtime_resume(struct device *dev) { struct rk_rng *rk_rng = dev_get_drvdata(dev); - return rk_rng_init(&rk_rng->rng); + return rk_rng->soc_data->rk_rng_init(&rk_rng->rng); } static const struct dev_pm_ops rk_rng_pm_ops = { @@ -204,7 +396,8 @@ static const struct dev_pm_ops rk_rng_pm_ops = { }; static const struct of_device_id rk_rng_dt_match[] = { - { .compatible = "rockchip,rk3568-rng", }, + { .compatible = "rockchip,rk3568-rng", .data = (void *)&rk3568_soc_data }, + { .compatible = "rockchip,rk3588-rng", .data = (void *)&rk3588_soc_data }, { /* sentinel */ }, }; @@ -221,8 +414,9 @@ static struct platform_driver rk_rng_driver = { module_platform_driver(rk_rng_driver); -MODULE_DESCRIPTION("Rockchip RK3568 True Random Number Generator driver"); +MODULE_DESCRIPTION("Rockchip True Random Number Generator driver"); MODULE_AUTHOR("Lin Jinhan <troy.lin@rock-chips.com>"); MODULE_AUTHOR("Aurelien Jarno <aurelien@aurel32.net>"); MODULE_AUTHOR("Daniel Golle <daniel@makrotopia.org>"); +MODULE_AUTHOR("Nicolas Frattaroli <nicolas.frattaroli@collabora.com>"); MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 19ab145f912e..47082782008a 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -855,5 +855,6 @@ config CRYPTO_DEV_SA2UL source "drivers/crypto/aspeed/Kconfig" source "drivers/crypto/starfive/Kconfig" +source "drivers/crypto/inside-secure/eip93/Kconfig" endif # CRYPTO_HW diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index fef18ffdb128..c97f0ebc55ec 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -43,7 +43,7 @@ obj-$(CONFIG_CRYPTO_DEV_TEGRA) += tegra/ obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/ #obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/ obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/ -obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/ +obj-y += inside-secure/ obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/ obj-y += xilinx/ obj-y += hisilicon/ diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index 1c1f57baef0e..500b08e42282 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -2897,13 +2897,13 @@ static int artpec6_crypto_probe(struct platform_device *pdev) tasklet_init(&ac->task, artpec6_crypto_task, (unsigned long)ac); - ac->pad_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX, + ac->pad_buffer = devm_kcalloc(&pdev->dev, 2, ARTPEC_CACHE_LINE_MAX, GFP_KERNEL); if (!ac->pad_buffer) return -ENOMEM; ac->pad_buffer = PTR_ALIGN(ac->pad_buffer, ARTPEC_CACHE_LINE_MAX); - ac->zero_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX, + ac->zero_buffer = devm_kcalloc(&pdev->dev, 2, ARTPEC_CACHE_LINE_MAX, GFP_KERNEL); if (!ac->zero_buffer) return -ENOMEM; diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 9e6798efbfb7..6b80d033648e 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -15,6 +15,7 @@ #include <linux/kthread.h> #include <linux/rtnetlink.h> #include <linux/sched.h> +#include <linux/string_choices.h> #include <linux/of.h> #include <linux/io.h> #include <linux/bitops.h> @@ -140,8 +141,8 @@ spu_skcipher_rx_sg_create(struct brcm_message *mssg, struct iproc_ctx_s *ctx = rctx->ctx; u32 datalen; /* Number of bytes of response data expected */ - mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist), - rctx->gfp); + mssg->spu.dst = kmalloc_array(rx_frag_num, sizeof(struct scatterlist), + rctx->gfp); if (!mssg->spu.dst) return -ENOMEM; @@ -204,8 +205,8 @@ spu_skcipher_tx_sg_create(struct brcm_message *mssg, u32 datalen; /* Number of bytes of response data expected */ u32 stat_len; - mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist), - rctx->gfp); + mssg->spu.src = kmalloc_array(tx_frag_num, sizeof(struct scatterlist), + rctx->gfp); if (unlikely(!mssg->spu.src)) return -ENOMEM; @@ -531,8 +532,8 @@ spu_ahash_rx_sg_create(struct brcm_message *mssg, struct scatterlist *sg; /* used to build sgs in mbox message */ struct iproc_ctx_s *ctx = rctx->ctx; - mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist), - rctx->gfp); + mssg->spu.dst = kmalloc_array(rx_frag_num, sizeof(struct scatterlist), + rctx->gfp); if (!mssg->spu.dst) return -ENOMEM; @@ -586,8 +587,8 @@ spu_ahash_tx_sg_create(struct brcm_message *mssg, u32 datalen; /* Number of bytes of response data expected */ u32 stat_len; - mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist), - rctx->gfp); + mssg->spu.src = kmalloc_array(tx_frag_num, sizeof(struct scatterlist), + rctx->gfp); if (!mssg->spu.src) return -ENOMEM; @@ -1076,8 +1077,8 @@ static int spu_aead_rx_sg_create(struct brcm_message *mssg, /* have to catch gcm pad in separate buffer */ rx_frag_num++; - mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist), - rctx->gfp); + mssg->spu.dst = kmalloc_array(rx_frag_num, sizeof(struct scatterlist), + rctx->gfp); if (!mssg->spu.dst) return -ENOMEM; @@ -1178,8 +1179,8 @@ static int spu_aead_tx_sg_create(struct brcm_message *mssg, u32 assoc_offset = 0; u32 stat_len; - mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist), - rctx->gfp); + mssg->spu.src = kmalloc_array(tx_frag_num, sizeof(struct scatterlist), + rctx->gfp); if (!mssg->spu.src) return -ENOMEM; @@ -2687,7 +2688,7 @@ static int aead_enqueue(struct aead_request *req, bool is_encrypt) flow_log(" iv_ctr_len:%u\n", rctx->iv_ctr_len); flow_dump(" iv: ", req->iv, rctx->iv_ctr_len); flow_log(" authkeylen:%u\n", ctx->authkeylen); - flow_log(" is_esp: %s\n", ctx->is_esp ? "yes" : "no"); + flow_log(" is_esp: %s\n", str_yes_no(ctx->is_esp)); if (ctx->max_payload == SPU_MAX_PAYLOAD_INF) flow_log(" max_payload infinite"); diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c index 3fdc64b5a65e..ce322cf1baa5 100644 --- a/drivers/crypto/bcm/spu2.c +++ b/drivers/crypto/bcm/spu2.c @@ -11,6 +11,7 @@ #include <linux/kernel.h> #include <linux/string.h> +#include <linux/string_choices.h> #include "util.h" #include "spu.h" @@ -999,7 +1000,7 @@ u32 spu2_create_request(u8 *spu_hdr, req_opts->is_inbound, req_opts->auth_first); flow_log(" cipher alg:%u mode:%u type %u\n", cipher_parms->alg, cipher_parms->mode, cipher_parms->type); - flow_log(" is_esp: %s\n", req_opts->is_esp ? "yes" : "no"); + flow_log(" is_esp: %s\n", str_yes_no(req_opts->is_esp)); flow_log(" key: %d\n", cipher_parms->key_len); flow_dump(" key: ", cipher_parms->key_buf, cipher_parms->key_len); flow_log(" iv: %d\n", cipher_parms->iv_len); diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index e809d030ab11..107ccb2ade42 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -19,6 +19,7 @@ #include <linux/dma-mapping.h> #include <linux/fsl/mc.h> #include <linux/kernel.h> +#include <linux/string_choices.h> #include <soc/fsl/dpaa2-io.h> #include <soc/fsl/dpaa2-fd.h> #include <crypto/xts.h> @@ -5175,7 +5176,7 @@ static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv) return err; } - dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true"); + dev_dbg(dev, "disable: %s\n", str_false_true(enabled)); for (i = 0; i < priv->num_pairs; i++) { ppriv = per_cpu_ptr(priv->ppriv, i); diff --git a/drivers/crypto/cavium/zip/zip_crypto.c b/drivers/crypto/cavium/zip/zip_crypto.c index 1046a746d36f..02e87f2d50db 100644 --- a/drivers/crypto/cavium/zip/zip_crypto.c +++ b/drivers/crypto/cavium/zip/zip_crypto.c @@ -195,48 +195,8 @@ static int zip_decompress(const u8 *src, unsigned int slen, return ret; } -/* Legacy Compress framework start */ -int zip_alloc_comp_ctx_deflate(struct crypto_tfm *tfm) -{ - struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm); - - return zip_ctx_init(zip_ctx, 0); -} - -int zip_alloc_comp_ctx_lzs(struct crypto_tfm *tfm) -{ - struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm); - - return zip_ctx_init(zip_ctx, 1); -} - -void zip_free_comp_ctx(struct crypto_tfm *tfm) -{ - struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm); - - zip_ctx_exit(zip_ctx); -} - -int zip_comp_compress(struct crypto_tfm *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) -{ - struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm); - - return zip_compress(src, slen, dst, dlen, zip_ctx); -} - -int zip_comp_decompress(struct crypto_tfm *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) -{ - struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm); - - return zip_decompress(src, slen, dst, dlen, zip_ctx); -} /* Legacy compress framework end */ - /* SCOMP framework start */ -void *zip_alloc_scomp_ctx_deflate(struct crypto_scomp *tfm) +void *zip_alloc_scomp_ctx_deflate(void) { int ret; struct zip_kernel_ctx *zip_ctx; @@ -255,7 +215,7 @@ void *zip_alloc_scomp_ctx_deflate(struct crypto_scomp *tfm) return zip_ctx; } -void *zip_alloc_scomp_ctx_lzs(struct crypto_scomp *tfm) +void *zip_alloc_scomp_ctx_lzs(void) { int ret; struct zip_kernel_ctx *zip_ctx; @@ -274,7 +234,7 @@ void *zip_alloc_scomp_ctx_lzs(struct crypto_scomp *tfm) return zip_ctx; } -void zip_free_scomp_ctx(struct crypto_scomp *tfm, void *ctx) +void zip_free_scomp_ctx(void *ctx) { struct zip_kernel_ctx *zip_ctx = ctx; diff --git a/drivers/crypto/cavium/zip/zip_crypto.h b/drivers/crypto/cavium/zip/zip_crypto.h index b59ddfcacd34..10899ece2d1f 100644 --- a/drivers/crypto/cavium/zip/zip_crypto.h +++ b/drivers/crypto/cavium/zip/zip_crypto.h @@ -46,7 +46,6 @@ #ifndef __ZIP_CRYPTO_H__ #define __ZIP_CRYPTO_H__ -#include <linux/crypto.h> #include <crypto/internal/scompress.h> #include "common.h" #include "zip_deflate.h" @@ -57,19 +56,9 @@ struct zip_kernel_ctx { struct zip_operation zip_decomp; }; -int zip_alloc_comp_ctx_deflate(struct crypto_tfm *tfm); -int zip_alloc_comp_ctx_lzs(struct crypto_tfm *tfm); -void zip_free_comp_ctx(struct crypto_tfm *tfm); -int zip_comp_compress(struct crypto_tfm *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen); -int zip_comp_decompress(struct crypto_tfm *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen); - -void *zip_alloc_scomp_ctx_deflate(struct crypto_scomp *tfm); -void *zip_alloc_scomp_ctx_lzs(struct crypto_scomp *tfm); -void zip_free_scomp_ctx(struct crypto_scomp *tfm, void *zip_ctx); +void *zip_alloc_scomp_ctx_deflate(void); +void *zip_alloc_scomp_ctx_lzs(void); +void zip_free_scomp_ctx(void *zip_ctx); int zip_scomp_compress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx); diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c index dc5b7bf7e1fd..abd58de4343d 100644 --- a/drivers/crypto/cavium/zip/zip_main.c +++ b/drivers/crypto/cavium/zip/zip_main.c @@ -371,36 +371,6 @@ static struct pci_driver zip_driver = { /* Kernel Crypto Subsystem Interface */ -static struct crypto_alg zip_comp_deflate = { - .cra_name = "deflate", - .cra_driver_name = "deflate-cavium", - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct zip_kernel_ctx), - .cra_priority = 300, - .cra_module = THIS_MODULE, - .cra_init = zip_alloc_comp_ctx_deflate, - .cra_exit = zip_free_comp_ctx, - .cra_u = { .compress = { - .coa_compress = zip_comp_compress, - .coa_decompress = zip_comp_decompress - } } -}; - -static struct crypto_alg zip_comp_lzs = { - .cra_name = "lzs", - .cra_driver_name = "lzs-cavium", - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct zip_kernel_ctx), - .cra_priority = 300, - .cra_module = THIS_MODULE, - .cra_init = zip_alloc_comp_ctx_lzs, - .cra_exit = zip_free_comp_ctx, - .cra_u = { .compress = { - .coa_compress = zip_comp_compress, - .coa_decompress = zip_comp_decompress - } } -}; - static struct scomp_alg zip_scomp_deflate = { .alloc_ctx = zip_alloc_scomp_ctx_deflate, .free_ctx = zip_free_scomp_ctx, @@ -431,22 +401,10 @@ static int zip_register_compression_device(void) { int ret; - ret = crypto_register_alg(&zip_comp_deflate); - if (ret < 0) { - zip_err("Deflate algorithm registration failed\n"); - return ret; - } - - ret = crypto_register_alg(&zip_comp_lzs); - if (ret < 0) { - zip_err("LZS algorithm registration failed\n"); - goto err_unregister_alg_deflate; - } - ret = crypto_register_scomp(&zip_scomp_deflate); if (ret < 0) { zip_err("Deflate scomp algorithm registration failed\n"); - goto err_unregister_alg_lzs; + return ret; } ret = crypto_register_scomp(&zip_scomp_lzs); @@ -459,18 +417,12 @@ static int zip_register_compression_device(void) err_unregister_scomp_deflate: crypto_unregister_scomp(&zip_scomp_deflate); -err_unregister_alg_lzs: - crypto_unregister_alg(&zip_comp_lzs); -err_unregister_alg_deflate: - crypto_unregister_alg(&zip_comp_deflate); return ret; } static void zip_unregister_compression_device(void) { - crypto_unregister_alg(&zip_comp_deflate); - crypto_unregister_alg(&zip_comp_lzs); crypto_unregister_scomp(&zip_scomp_deflate); crypto_unregister_scomp(&zip_scomp_lzs); } diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index 248d98fd8c48..2ebc878da160 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -189,14 +189,17 @@ static bool sp_pci_is_master(struct sp_device *sp) pdev_new = to_pci_dev(dev_new); pdev_cur = to_pci_dev(dev_cur); - if (pdev_new->bus->number < pdev_cur->bus->number) - return true; + if (pci_domain_nr(pdev_new->bus) != pci_domain_nr(pdev_cur->bus)) + return pci_domain_nr(pdev_new->bus) < pci_domain_nr(pdev_cur->bus); - if (PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn)) - return true; + if (pdev_new->bus->number != pdev_cur->bus->number) + return pdev_new->bus->number < pdev_cur->bus->number; - if (PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn)) - return true; + if (PCI_SLOT(pdev_new->devfn) != PCI_SLOT(pdev_cur->devfn)) + return PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn); + + if (PCI_FUNC(pdev_new->devfn) != PCI_FUNC(pdev_cur->devfn)) + return PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn); return false; } @@ -529,6 +532,7 @@ static const struct pci_device_id sp_pci_table[] = { { PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] }, { PCI_VDEVICE(AMD, 0x15C7), (kernel_ulong_t)&dev_vdata[6] }, { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] }, + { PCI_VDEVICE(AMD, 0x1134), (kernel_ulong_t)&dev_vdata[7] }, { PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] }, { PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] }, /* Last entry must be zero */ diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index 2a2910261210..61b5e1c5d019 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -39,6 +39,8 @@ struct hpre_ctx; #define HPRE_DFX_SEC_TO_US 1000000 #define HPRE_DFX_US_TO_NS 1000 +#define HPRE_ENABLE_HPCORE_SHIFT 7 + /* due to nist p521 */ #define HPRE_ECC_MAX_KSZ 66 @@ -131,6 +133,8 @@ struct hpre_ctx { }; /* for ecc algorithms */ unsigned int curve_id; + /* for high performance core */ + u8 enable_hpcore; }; struct hpre_asym_request { @@ -1619,6 +1623,8 @@ static int hpre_ecdh_compute_value(struct kpp_request *req) } msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL); + msg->resv1 = ctx->enable_hpcore << HPRE_ENABLE_HPCORE_SHIFT; + ret = hpre_send(ctx, msg); if (likely(!ret)) return -EINPROGRESS; @@ -1653,6 +1659,7 @@ static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm) struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); ctx->curve_id = ECC_CURVE_NIST_P256; + ctx->enable_hpcore = 1; kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 4b9970230822..703920b49c7c 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -37,7 +37,6 @@ struct sec_aead_req { u8 *a_ivin; dma_addr_t a_ivin_dma; struct aead_request *aead_req; - bool fallback; }; /* SEC request of Crypto */ diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 66bc07da9eb6..8ea5305bc320 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -57,7 +57,6 @@ #define SEC_TYPE_MASK 0x0F #define SEC_DONE_MASK 0x0001 #define SEC_ICV_MASK 0x000E -#define SEC_SQE_LEN_RATE_MASK 0x3 #define SEC_TOTAL_IV_SZ(depth) (SEC_IV_SIZE * (depth)) #define SEC_SGL_SGE_NR 128 @@ -80,16 +79,16 @@ #define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \ SEC_PBUF_LEFT_SZ(depth)) -#define SEC_SQE_LEN_RATE 4 #define SEC_SQE_CFLAG 2 #define SEC_SQE_AEAD_FLAG 3 #define SEC_SQE_DONE 0x1 #define SEC_ICV_ERR 0x2 -#define MIN_MAC_LEN 4 #define MAC_LEN_MASK 0x1U #define MAX_INPUT_DATA_LEN 0xFFFE00 #define BITS_MASK 0xFF +#define WORD_MASK 0x3 #define BYTE_BITS 0x8 +#define BYTES_TO_WORDS(bcount) ((bcount) >> 2) #define SEC_XTS_NAME_SZ 0x3 #define IV_CM_CAL_NUM 2 #define IV_CL_MASK 0x7 @@ -691,14 +690,10 @@ static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm) c_ctx->fallback = false; - /* Currently, only XTS mode need fallback tfm when using 192bit key */ - if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ))) - return 0; - c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(c_ctx->fbtfm)) { - pr_err("failed to alloc xts mode fallback tfm!\n"); + pr_err("failed to alloc fallback tfm for %s!\n", alg); return PTR_ERR(c_ctx->fbtfm); } @@ -858,7 +853,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, } memcpy(c_ctx->c_key, key, keylen); - if (c_ctx->fallback && c_ctx->fbtfm) { + if (c_ctx->fbtfm) { ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen); if (ret) { dev_err(dev, "failed to set fallback skcipher key!\n"); @@ -1090,11 +1085,6 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, struct crypto_shash *hash_tfm = ctx->hash_tfm; int blocksize, digestsize, ret; - if (!keys->authkeylen) { - pr_err("hisi_sec2: aead auth key error!\n"); - return -EINVAL; - } - blocksize = crypto_shash_blocksize(hash_tfm); digestsize = crypto_shash_digestsize(hash_tfm); if (keys->authkeylen > blocksize) { @@ -1106,7 +1096,8 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, } ctx->a_key_len = digestsize; } else { - memcpy(ctx->a_key, keys->authkey, keys->authkeylen); + if (keys->authkeylen) + memcpy(ctx->a_key, keys->authkey, keys->authkeylen); ctx->a_key_len = keys->authkeylen; } @@ -1160,8 +1151,10 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, } ret = crypto_authenc_extractkeys(&keys, key, keylen); - if (ret) + if (ret) { + dev_err(dev, "sec extract aead keys err!\n"); goto bad_key; + } ret = sec_aead_aes_set_key(c_ctx, &keys); if (ret) { @@ -1175,12 +1168,6 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, goto bad_key; } - if (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK) { - ret = -EINVAL; - dev_err(dev, "AUTH key length error!\n"); - goto bad_key; - } - ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen); if (ret) { dev_err(dev, "set sec fallback key err!\n"); @@ -1583,11 +1570,10 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir, sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma); - sec_sqe->type2.mac_key_alg = cpu_to_le32(authsize / SEC_SQE_LEN_RATE); + sec_sqe->type2.mac_key_alg = cpu_to_le32(BYTES_TO_WORDS(authsize)); sec_sqe->type2.mac_key_alg |= - cpu_to_le32((u32)((ctx->a_key_len) / - SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET); + cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET); sec_sqe->type2.mac_key_alg |= cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET); @@ -1639,12 +1625,10 @@ static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir, sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma); sqe3->auth_mac_key |= - cpu_to_le32((u32)(authsize / - SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3); + cpu_to_le32(BYTES_TO_WORDS(authsize) << SEC_MAC_OFFSET_V3); sqe3->auth_mac_key |= - cpu_to_le32((u32)(ctx->a_key_len / - SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET_V3); + cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET_V3); sqe3->auth_mac_key |= cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3); @@ -2003,8 +1987,7 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm) return sec_aead_ctx_init(tfm, "sha512"); } -static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, - struct sec_req *sreq) +static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, struct sec_req *sreq) { u32 cryptlen = sreq->c_req.sk_req->cryptlen; struct device *dev = ctx->dev; @@ -2026,10 +2009,6 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, } break; case SEC_CMODE_CTR: - if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) { - dev_err(dev, "skcipher HW version error!\n"); - ret = -EINVAL; - } break; default: ret = -EINVAL; @@ -2038,17 +2017,21 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, return ret; } -static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq) +static int sec_skcipher_param_check(struct sec_ctx *ctx, + struct sec_req *sreq, bool *need_fallback) { struct skcipher_request *sk_req = sreq->c_req.sk_req; struct device *dev = ctx->dev; u8 c_alg = ctx->c_ctx.c_alg; - if (unlikely(!sk_req->src || !sk_req->dst || - sk_req->cryptlen > MAX_INPUT_DATA_LEN)) { + if (unlikely(!sk_req->src || !sk_req->dst)) { dev_err(dev, "skcipher input param error!\n"); return -EINVAL; } + + if (sk_req->cryptlen > MAX_INPUT_DATA_LEN) + *need_fallback = true; + sreq->c_req.c_len = sk_req->cryptlen; if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ) @@ -2106,6 +2089,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req); struct sec_req *req = skcipher_request_ctx(sk_req); struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); + bool need_fallback = false; int ret; if (!sk_req->cryptlen) { @@ -2119,11 +2103,11 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) req->c_req.encrypt = encrypt; req->ctx = ctx; - ret = sec_skcipher_param_check(ctx, req); + ret = sec_skcipher_param_check(ctx, req, &need_fallback); if (unlikely(ret)) return -EINVAL; - if (unlikely(ctx->c_ctx.fallback)) + if (unlikely(ctx->c_ctx.fallback || need_fallback)) return sec_skcipher_soft_crypto(ctx, sk_req, encrypt); return ctx->req_op->process(ctx, req); @@ -2231,52 +2215,35 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq) struct crypto_aead *tfm = crypto_aead_reqtfm(req); size_t sz = crypto_aead_authsize(tfm); u8 c_mode = ctx->c_ctx.c_mode; - struct device *dev = ctx->dev; int ret; - /* Hardware does not handle cases where authsize is less than 4 bytes */ - if (unlikely(sz < MIN_MAC_LEN)) { - sreq->aead_req.fallback = true; + if (unlikely(ctx->sec->qm.ver == QM_HW_V2 && !sreq->c_req.c_len)) return -EINVAL; - } if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN || - req->assoclen > SEC_MAX_AAD_LEN)) { - dev_err(dev, "aead input spec error!\n"); + req->assoclen > SEC_MAX_AAD_LEN)) return -EINVAL; - } if (c_mode == SEC_CMODE_CCM) { - if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) { - dev_err_ratelimited(dev, "CCM input aad parameter is too long!\n"); + if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) return -EINVAL; - } - ret = aead_iv_demension_check(req); - if (ret) { - dev_err(dev, "aead input iv param error!\n"); - return ret; - } - } - if (sreq->c_req.encrypt) - sreq->c_req.c_len = req->cryptlen; - else - sreq->c_req.c_len = req->cryptlen - sz; - if (c_mode == SEC_CMODE_CBC) { - if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { - dev_err(dev, "aead crypto length error!\n"); + ret = aead_iv_demension_check(req); + if (unlikely(ret)) + return -EINVAL; + } else if (c_mode == SEC_CMODE_CBC) { + if (unlikely(sz & WORD_MASK)) + return -EINVAL; + if (unlikely(ctx->a_ctx.a_key_len & WORD_MASK)) return -EINVAL; - } } return 0; } -static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) +static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq, bool *need_fallback) { struct aead_request *req = sreq->aead_req.aead_req; - struct crypto_aead *tfm = crypto_aead_reqtfm(req); - size_t authsize = crypto_aead_authsize(tfm); struct device *dev = ctx->dev; u8 c_alg = ctx->c_ctx.c_alg; @@ -2285,12 +2252,10 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) return -EINVAL; } - if (ctx->sec->qm.ver == QM_HW_V2) { - if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt && - req->cryptlen <= authsize))) { - sreq->aead_req.fallback = true; - return -EINVAL; - } + if (unlikely(ctx->c_ctx.c_mode == SEC_CMODE_CBC && + sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { + dev_err(dev, "aead cbc mode input data length error!\n"); + return -EINVAL; } /* Support AES or SM4 */ @@ -2299,8 +2264,10 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) return -EINVAL; } - if (unlikely(sec_aead_spec_check(ctx, sreq))) + if (unlikely(sec_aead_spec_check(ctx, sreq))) { + *need_fallback = true; return -EINVAL; + } if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <= SEC_PBUF_SZ) @@ -2344,17 +2311,19 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); struct sec_req *req = aead_request_ctx(a_req); struct sec_ctx *ctx = crypto_aead_ctx(tfm); + size_t sz = crypto_aead_authsize(tfm); + bool need_fallback = false; int ret; req->flag = a_req->base.flags; req->aead_req.aead_req = a_req; req->c_req.encrypt = encrypt; req->ctx = ctx; - req->aead_req.fallback = false; + req->c_req.c_len = a_req->cryptlen - (req->c_req.encrypt ? 0 : sz); - ret = sec_aead_param_check(ctx, req); + ret = sec_aead_param_check(ctx, req, &need_fallback); if (unlikely(ret)) { - if (req->aead_req.fallback) + if (need_fallback) return sec_aead_soft_crypto(ctx, a_req, encrypt); return -EINVAL; } diff --git a/drivers/crypto/inside-secure/Makefile b/drivers/crypto/inside-secure/Makefile index 13f64f96c626..30d13fd5d58e 100644 --- a/drivers/crypto/inside-secure/Makefile +++ b/drivers/crypto/inside-secure/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += crypto_safexcel.o crypto_safexcel-objs := safexcel.o safexcel_ring.o safexcel_cipher.o safexcel_hash.o +obj-y += eip93/ diff --git a/drivers/crypto/inside-secure/eip93/Kconfig b/drivers/crypto/inside-secure/eip93/Kconfig new file mode 100644 index 000000000000..8353d3d7ec9b --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/Kconfig @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0 +config CRYPTO_DEV_EIP93 + tristate "Support for EIP93 crypto HW accelerators" + depends on SOC_MT7621 || ARCH_AIROHA ||COMPILE_TEST + select CRYPTO_LIB_AES + select CRYPTO_LIB_DES + select CRYPTO_SKCIPHER + select CRYPTO_AEAD + select CRYPTO_AUTHENC + select CRYPTO_MD5 + select CRYPTO_SHA1 + select CRYPTO_SHA256 + help + EIP93 have various crypto HW accelerators. Select this if + you want to use the EIP93 modules for any of the crypto algorithms. + + If the IP supports it, this provide offload for AES - ECB, CBC and + CTR crypto. Also provide DES and 3DES ECB and CBC. + + Also provide AEAD authenc(hmac(x), cipher(y)) for supported algo. diff --git a/drivers/crypto/inside-secure/eip93/Makefile b/drivers/crypto/inside-secure/eip93/Makefile new file mode 100644 index 000000000000..a3d3d3677cdc --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/Makefile @@ -0,0 +1,5 @@ +obj-$(CONFIG_CRYPTO_DEV_EIP93) += crypto-hw-eip93.o + +crypto-hw-eip93-y += eip93-main.o eip93-common.o +crypto-hw-eip93-y += eip93-cipher.o eip93-aead.o +crypto-hw-eip93-y += eip93-hash.o diff --git a/drivers/crypto/inside-secure/eip93/eip93-aead.c b/drivers/crypto/inside-secure/eip93/eip93-aead.c new file mode 100644 index 000000000000..18dd8a9a5165 --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-aead.c @@ -0,0 +1,711 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen <vschagen@icloud.com> + * Christian Marangi <ansuelsmth@gmail.com + */ + +#include <crypto/aead.h> +#include <crypto/aes.h> +#include <crypto/authenc.h> +#include <crypto/ctr.h> +#include <crypto/hmac.h> +#include <crypto/internal/aead.h> +#include <crypto/md5.h> +#include <crypto/null.h> +#include <crypto/sha1.h> +#include <crypto/sha2.h> + +#include <crypto/internal/des.h> + +#include <linux/crypto.h> +#include <linux/dma-mapping.h> + +#include "eip93-aead.h" +#include "eip93-cipher.h" +#include "eip93-common.h" +#include "eip93-regs.h" + +void eip93_aead_handle_result(struct crypto_async_request *async, int err) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm); + struct eip93_device *eip93 = ctx->eip93; + struct aead_request *req = aead_request_cast(async); + struct eip93_cipher_reqctx *rctx = aead_request_ctx(req); + + eip93_unmap_dma(eip93, rctx, req->src, req->dst); + eip93_handle_result(eip93, rctx, req->iv); + + aead_request_complete(req, err); +} + +static int eip93_aead_send_req(struct crypto_async_request *async) +{ + struct aead_request *req = aead_request_cast(async); + struct eip93_cipher_reqctx *rctx = aead_request_ctx(req); + int err; + + err = check_valid_request(rctx); + if (err) { + aead_request_complete(req, err); + return err; + } + + return eip93_send_req(async, req->iv, rctx); +} + +/* Crypto aead API functions */ +static int eip93_aead_cra_init(struct crypto_tfm *tfm) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg, + struct eip93_alg_template, alg.aead.base); + + crypto_aead_set_reqsize(__crypto_aead_cast(tfm), + sizeof(struct eip93_cipher_reqctx)); + + ctx->eip93 = tmpl->eip93; + ctx->flags = tmpl->flags; + ctx->type = tmpl->type; + ctx->set_assoc = true; + + ctx->sa_record = kzalloc(sizeof(*ctx->sa_record), GFP_KERNEL); + if (!ctx->sa_record) + return -ENOMEM; + + return 0; +} + +static void eip93_aead_cra_exit(struct crypto_tfm *tfm) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + + dma_unmap_single(ctx->eip93->dev, ctx->sa_record_base, + sizeof(*ctx->sa_record), DMA_TO_DEVICE); + kfree(ctx->sa_record); +} + +static int eip93_aead_setkey(struct crypto_aead *ctfm, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_aead_tfm(ctfm); + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_authenc_keys keys; + struct crypto_aes_ctx aes; + struct sa_record *sa_record = ctx->sa_record; + u32 nonce = 0; + int ret; + + if (crypto_authenc_extractkeys(&keys, key, len)) + return -EINVAL; + + if (IS_RFC3686(ctx->flags)) { + if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE) + return -EINVAL; + + keys.enckeylen -= CTR_RFC3686_NONCE_SIZE; + memcpy(&nonce, keys.enckey + keys.enckeylen, + CTR_RFC3686_NONCE_SIZE); + } + + switch ((ctx->flags & EIP93_ALG_MASK)) { + case EIP93_ALG_DES: + ret = verify_aead_des_key(ctfm, keys.enckey, keys.enckeylen); + if (ret) + return ret; + + break; + case EIP93_ALG_3DES: + if (keys.enckeylen != DES3_EDE_KEY_SIZE) + return -EINVAL; + + ret = verify_aead_des3_key(ctfm, keys.enckey, keys.enckeylen); + if (ret) + return ret; + + break; + case EIP93_ALG_AES: + ret = aes_expandkey(&aes, keys.enckey, keys.enckeylen); + if (ret) + return ret; + + break; + } + + ctx->blksize = crypto_aead_blocksize(ctfm); + /* Encryption key */ + eip93_set_sa_record(sa_record, keys.enckeylen, ctx->flags); + sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_OPCODE; + sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_OPCODE, + EIP93_SA_CMD_OPCODE_BASIC_OUT_ENC_HASH); + sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_DIGEST_LENGTH; + sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, + ctx->authsize / sizeof(u32)); + + memcpy(sa_record->sa_key, keys.enckey, keys.enckeylen); + ctx->sa_nonce = nonce; + sa_record->sa_nonce = nonce; + + /* authentication key */ + ret = eip93_hmac_setkey(ctx->flags, keys.authkey, keys.authkeylen, + ctx->authsize, sa_record->sa_i_digest, + sa_record->sa_o_digest, false); + + ctx->set_assoc = true; + + return ret; +} + +static int eip93_aead_setauthsize(struct crypto_aead *ctfm, + unsigned int authsize) +{ + struct crypto_tfm *tfm = crypto_aead_tfm(ctfm); + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + + ctx->authsize = authsize; + ctx->sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_DIGEST_LENGTH; + ctx->sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, + ctx->authsize / sizeof(u32)); + + return 0; +} + +static void eip93_aead_setassoc(struct eip93_crypto_ctx *ctx, + struct aead_request *req) +{ + struct sa_record *sa_record = ctx->sa_record; + + sa_record->sa_cmd1_word &= ~EIP93_SA_CMD_HASH_CRYPT_OFFSET; + sa_record->sa_cmd1_word |= FIELD_PREP(EIP93_SA_CMD_HASH_CRYPT_OFFSET, + req->assoclen / sizeof(u32)); + + ctx->assoclen = req->assoclen; +} + +static int eip93_aead_crypt(struct aead_request *req) +{ + struct eip93_cipher_reqctx *rctx = aead_request_ctx(req); + struct crypto_async_request *async = &req->base; + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_aead *aead = crypto_aead_reqtfm(req); + int ret; + + ctx->sa_record_base = dma_map_single(ctx->eip93->dev, ctx->sa_record, + sizeof(*ctx->sa_record), DMA_TO_DEVICE); + ret = dma_mapping_error(ctx->eip93->dev, ctx->sa_record_base); + if (ret) + return ret; + + rctx->textsize = req->cryptlen; + rctx->blksize = ctx->blksize; + rctx->assoclen = req->assoclen; + rctx->authsize = ctx->authsize; + rctx->sg_src = req->src; + rctx->sg_dst = req->dst; + rctx->ivsize = crypto_aead_ivsize(aead); + rctx->desc_flags = EIP93_DESC_AEAD; + rctx->sa_record_base = ctx->sa_record_base; + + if (IS_DECRYPT(rctx->flags)) + rctx->textsize -= rctx->authsize; + + return eip93_aead_send_req(async); +} + +static int eip93_aead_encrypt(struct aead_request *req) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct eip93_cipher_reqctx *rctx = aead_request_ctx(req); + + rctx->flags = ctx->flags; + rctx->flags |= EIP93_ENCRYPT; + if (ctx->set_assoc) { + eip93_aead_setassoc(ctx, req); + ctx->set_assoc = false; + } + + if (req->assoclen != ctx->assoclen) { + dev_err(ctx->eip93->dev, "Request AAD length error\n"); + return -EINVAL; + } + + return eip93_aead_crypt(req); +} + +static int eip93_aead_decrypt(struct aead_request *req) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct eip93_cipher_reqctx *rctx = aead_request_ctx(req); + + ctx->sa_record->sa_cmd0_word |= EIP93_SA_CMD_DIRECTION_IN; + ctx->sa_record->sa_cmd1_word &= ~(EIP93_SA_CMD_COPY_PAD | + EIP93_SA_CMD_COPY_DIGEST); + + rctx->flags = ctx->flags; + rctx->flags |= EIP93_DECRYPT; + if (ctx->set_assoc) { + eip93_aead_setassoc(ctx, req); + ctx->set_assoc = false; + } + + if (req->assoclen != ctx->assoclen) { + dev_err(ctx->eip93->dev, "Request AAD length error\n"); + return -EINVAL; + } + + return eip93_aead_crypt(req); +} + +/* Available authenc algorithms in this module */ +struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_aes = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 | EIP93_MODE_CBC | EIP93_ALG_AES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = MD5_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(md5),cbc(aes))", + .cra_driver_name = + "authenc(hmac(md5-eip93), cbc(aes-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_aes = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 | EIP93_MODE_CBC | EIP93_ALG_AES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha1),cbc(aes))", + .cra_driver_name = + "authenc(hmac(sha1-eip93),cbc(aes-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_aes = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 | EIP93_MODE_CBC | EIP93_ALG_AES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA224_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha224),cbc(aes))", + .cra_driver_name = + "authenc(hmac(sha224-eip93),cbc(aes-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_aes = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 | EIP93_MODE_CBC | EIP93_ALG_AES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = AES_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA256_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha256),cbc(aes))", + .cra_driver_name = + "authenc(hmac(sha256-eip93),cbc(aes-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_md5_rfc3686_aes = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 | + EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = MD5_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(md5),rfc3686(ctr(aes)))", + .cra_driver_name = + "authenc(hmac(md5-eip93),rfc3686(ctr(aes-eip93)))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha1_rfc3686_aes = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 | + EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))", + .cra_driver_name = + "authenc(hmac(sha1-eip93),rfc3686(ctr(aes-eip93)))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha224_rfc3686_aes = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 | + EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA224_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))", + .cra_driver_name = + "authenc(hmac(sha224-eip93),rfc3686(ctr(aes-eip93)))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha256_rfc3686_aes = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 | + EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = CTR_RFC3686_IV_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA256_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))", + .cra_driver_name = + "authenc(hmac(sha256-eip93),rfc3686(ctr(aes-eip93)))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 | EIP93_MODE_CBC | EIP93_ALG_DES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = MD5_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(md5),cbc(des))", + .cra_driver_name = + "authenc(hmac(md5-eip93),cbc(des-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 | EIP93_MODE_CBC | EIP93_ALG_DES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha1),cbc(des))", + .cra_driver_name = + "authenc(hmac(sha1-eip93),cbc(des-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 | EIP93_MODE_CBC | EIP93_ALG_DES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA224_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha224),cbc(des))", + .cra_driver_name = + "authenc(hmac(sha224-eip93),cbc(des-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 | EIP93_MODE_CBC | EIP93_ALG_DES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = DES_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA256_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha256),cbc(des))", + .cra_driver_name = + "authenc(hmac(sha256-eip93),cbc(des-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des3_ede = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5 | EIP93_MODE_CBC | EIP93_ALG_3DES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = MD5_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(md5),cbc(des3_ede))", + .cra_driver_name = + "authenc(hmac(md5-eip93),cbc(des3_ede-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0x0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des3_ede = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1 | EIP93_MODE_CBC | EIP93_ALG_3DES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA1_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", + .cra_driver_name = + "authenc(hmac(sha1-eip93),cbc(des3_ede-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0x0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des3_ede = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224 | EIP93_MODE_CBC | EIP93_ALG_3DES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA224_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha224),cbc(des3_ede))", + .cra_driver_name = + "authenc(hmac(sha224-eip93),cbc(des3_ede-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0x0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des3_ede = { + .type = EIP93_ALG_TYPE_AEAD, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256 | EIP93_MODE_CBC | EIP93_ALG_3DES, + .alg.aead = { + .setkey = eip93_aead_setkey, + .encrypt = eip93_aead_encrypt, + .decrypt = eip93_aead_decrypt, + .ivsize = DES3_EDE_BLOCK_SIZE, + .setauthsize = eip93_aead_setauthsize, + .maxauthsize = SHA256_DIGEST_SIZE, + .base = { + .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", + .cra_driver_name = + "authenc(hmac(sha256-eip93),cbc(des3_ede-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0x0, + .cra_init = eip93_aead_cra_init, + .cra_exit = eip93_aead_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; diff --git a/drivers/crypto/inside-secure/eip93/eip93-aead.h b/drivers/crypto/inside-secure/eip93/eip93-aead.h new file mode 100644 index 000000000000..e2fa8fd39c50 --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-aead.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen <vschagen@icloud.com> + * Christian Marangi <ansuelsmth@gmail.com + */ +#ifndef _EIP93_AEAD_H_ +#define _EIP93_AEAD_H_ + +extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_aes; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_aes; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_aes; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_aes; +extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_ctr_aes; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_ctr_aes; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_ctr_aes; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_ctr_aes; +extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_rfc3686_aes; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_rfc3686_aes; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_rfc3686_aes; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_rfc3686_aes; +extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des; +extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_cbc_des3_ede; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_cbc_des3_ede; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_cbc_des3_ede; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_cbc_des3_ede; +extern struct eip93_alg_template eip93_alg_authenc_hmac_md5_ecb_null; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha1_ecb_null; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha224_ecb_null; +extern struct eip93_alg_template eip93_alg_authenc_hmac_sha256_ecb_null; + +void eip93_aead_handle_result(struct crypto_async_request *async, int err); + +#endif /* _EIP93_AEAD_H_ */ diff --git a/drivers/crypto/inside-secure/eip93/eip93-aes.h b/drivers/crypto/inside-secure/eip93/eip93-aes.h new file mode 100644 index 000000000000..1d83d39cab2a --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-aes.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen <vschagen@icloud.com> + * Christian Marangi <ansuelsmth@gmail.com + */ +#ifndef _EIP93_AES_H_ +#define _EIP93_AES_H_ + +extern struct eip93_alg_template eip93_alg_ecb_aes; +extern struct eip93_alg_template eip93_alg_cbc_aes; +extern struct eip93_alg_template eip93_alg_ctr_aes; +extern struct eip93_alg_template eip93_alg_rfc3686_aes; + +#endif /* _EIP93_AES_H_ */ diff --git a/drivers/crypto/inside-secure/eip93/eip93-cipher.c b/drivers/crypto/inside-secure/eip93/eip93-cipher.c new file mode 100644 index 000000000000..1f2d6846610f --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-cipher.c @@ -0,0 +1,413 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen <vschagen@icloud.com> + * Christian Marangi <ansuelsmth@gmail.com + */ + +#include <crypto/aes.h> +#include <crypto/ctr.h> +#include <crypto/internal/des.h> +#include <linux/dma-mapping.h> + +#include "eip93-aes.h" +#include "eip93-cipher.h" +#include "eip93-common.h" +#include "eip93-des.h" +#include "eip93-regs.h" + +void eip93_skcipher_handle_result(struct crypto_async_request *async, int err) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm); + struct eip93_device *eip93 = ctx->eip93; + struct skcipher_request *req = skcipher_request_cast(async); + struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req); + + eip93_unmap_dma(eip93, rctx, req->src, req->dst); + eip93_handle_result(eip93, rctx, req->iv); + + skcipher_request_complete(req, err); +} + +static int eip93_skcipher_send_req(struct crypto_async_request *async) +{ + struct skcipher_request *req = skcipher_request_cast(async); + struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req); + int err; + + err = check_valid_request(rctx); + + if (err) { + skcipher_request_complete(req, err); + return err; + } + + return eip93_send_req(async, req->iv, rctx); +} + +/* Crypto skcipher API functions */ +static int eip93_skcipher_cra_init(struct crypto_tfm *tfm) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg, + struct eip93_alg_template, alg.skcipher.base); + + crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), + sizeof(struct eip93_cipher_reqctx)); + + memset(ctx, 0, sizeof(*ctx)); + + ctx->eip93 = tmpl->eip93; + ctx->type = tmpl->type; + + ctx->sa_record = kzalloc(sizeof(*ctx->sa_record), GFP_KERNEL); + if (!ctx->sa_record) + return -ENOMEM; + + return 0; +} + +static void eip93_skcipher_cra_exit(struct crypto_tfm *tfm) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + + dma_unmap_single(ctx->eip93->dev, ctx->sa_record_base, + sizeof(*ctx->sa_record), DMA_TO_DEVICE); + kfree(ctx->sa_record); +} + +static int eip93_skcipher_setkey(struct crypto_skcipher *ctfm, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm); + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg, + struct eip93_alg_template, + alg.skcipher.base); + struct sa_record *sa_record = ctx->sa_record; + unsigned int keylen = len; + u32 flags = tmpl->flags; + u32 nonce = 0; + int ret; + + if (!key || !keylen) + return -EINVAL; + + if (IS_RFC3686(flags)) { + if (len < CTR_RFC3686_NONCE_SIZE) + return -EINVAL; + + keylen = len - CTR_RFC3686_NONCE_SIZE; + memcpy(&nonce, key + keylen, CTR_RFC3686_NONCE_SIZE); + } + + if (flags & EIP93_ALG_DES) { + ctx->blksize = DES_BLOCK_SIZE; + ret = verify_skcipher_des_key(ctfm, key); + if (ret) + return ret; + } + if (flags & EIP93_ALG_3DES) { + ctx->blksize = DES3_EDE_BLOCK_SIZE; + ret = verify_skcipher_des3_key(ctfm, key); + if (ret) + return ret; + } + + if (flags & EIP93_ALG_AES) { + struct crypto_aes_ctx aes; + + ctx->blksize = AES_BLOCK_SIZE; + ret = aes_expandkey(&aes, key, keylen); + if (ret) + return ret; + } + + eip93_set_sa_record(sa_record, keylen, flags); + + memcpy(sa_record->sa_key, key, keylen); + ctx->sa_nonce = nonce; + sa_record->sa_nonce = nonce; + + return 0; +} + +static int eip93_skcipher_crypt(struct skcipher_request *req) +{ + struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req); + struct crypto_async_request *async = &req->base; + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + int ret; + + if (!req->cryptlen) + return 0; + + /* + * ECB and CBC algorithms require message lengths to be + * multiples of block size. + */ + if (IS_ECB(rctx->flags) || IS_CBC(rctx->flags)) + if (!IS_ALIGNED(req->cryptlen, + crypto_skcipher_blocksize(skcipher))) + return -EINVAL; + + ctx->sa_record_base = dma_map_single(ctx->eip93->dev, ctx->sa_record, + sizeof(*ctx->sa_record), DMA_TO_DEVICE); + ret = dma_mapping_error(ctx->eip93->dev, ctx->sa_record_base); + if (ret) + return ret; + + rctx->assoclen = 0; + rctx->textsize = req->cryptlen; + rctx->authsize = 0; + rctx->sg_src = req->src; + rctx->sg_dst = req->dst; + rctx->ivsize = crypto_skcipher_ivsize(skcipher); + rctx->blksize = ctx->blksize; + rctx->desc_flags = EIP93_DESC_SKCIPHER; + rctx->sa_record_base = ctx->sa_record_base; + + return eip93_skcipher_send_req(async); +} + +static int eip93_skcipher_encrypt(struct skcipher_request *req) +{ + struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req); + struct eip93_alg_template *tmpl = container_of(req->base.tfm->__crt_alg, + struct eip93_alg_template, alg.skcipher.base); + + rctx->flags = tmpl->flags; + rctx->flags |= EIP93_ENCRYPT; + + return eip93_skcipher_crypt(req); +} + +static int eip93_skcipher_decrypt(struct skcipher_request *req) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct eip93_cipher_reqctx *rctx = skcipher_request_ctx(req); + struct eip93_alg_template *tmpl = container_of(req->base.tfm->__crt_alg, + struct eip93_alg_template, alg.skcipher.base); + + ctx->sa_record->sa_cmd0_word |= EIP93_SA_CMD_DIRECTION_IN; + + rctx->flags = tmpl->flags; + rctx->flags |= EIP93_DECRYPT; + + return eip93_skcipher_crypt(req); +} + +/* Available algorithms in this module */ +struct eip93_alg_template eip93_alg_ecb_aes = { + .type = EIP93_ALG_TYPE_SKCIPHER, + .flags = EIP93_MODE_ECB | EIP93_ALG_AES, + .alg.skcipher = { + .setkey = eip93_skcipher_setkey, + .encrypt = eip93_skcipher_encrypt, + .decrypt = eip93_skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = 0, + .base = { + .cra_name = "ecb(aes)", + .cra_driver_name = "ecb(aes-eip93)", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0xf, + .cra_init = eip93_skcipher_cra_init, + .cra_exit = eip93_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_cbc_aes = { + .type = EIP93_ALG_TYPE_SKCIPHER, + .flags = EIP93_MODE_CBC | EIP93_ALG_AES, + .alg.skcipher = { + .setkey = eip93_skcipher_setkey, + .encrypt = eip93_skcipher_encrypt, + .decrypt = eip93_skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc(aes-eip93)", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0xf, + .cra_init = eip93_skcipher_cra_init, + .cra_exit = eip93_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_ctr_aes = { + .type = EIP93_ALG_TYPE_SKCIPHER, + .flags = EIP93_MODE_CTR | EIP93_ALG_AES, + .alg.skcipher = { + .setkey = eip93_skcipher_setkey, + .encrypt = eip93_skcipher_encrypt, + .decrypt = eip93_skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .base = { + .cra_name = "ctr(aes)", + .cra_driver_name = "ctr(aes-eip93)", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0xf, + .cra_init = eip93_skcipher_cra_init, + .cra_exit = eip93_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_rfc3686_aes = { + .type = EIP93_ALG_TYPE_SKCIPHER, + .flags = EIP93_MODE_CTR | EIP93_MODE_RFC3686 | EIP93_ALG_AES, + .alg.skcipher = { + .setkey = eip93_skcipher_setkey, + .encrypt = eip93_skcipher_encrypt, + .decrypt = eip93_skcipher_decrypt, + .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, + .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, + .ivsize = CTR_RFC3686_IV_SIZE, + .base = { + .cra_name = "rfc3686(ctr(aes))", + .cra_driver_name = "rfc3686(ctr(aes-eip93))", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_NEED_FALLBACK | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = 1, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0xf, + .cra_init = eip93_skcipher_cra_init, + .cra_exit = eip93_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_ecb_des = { + .type = EIP93_ALG_TYPE_SKCIPHER, + .flags = EIP93_MODE_ECB | EIP93_ALG_DES, + .alg.skcipher = { + .setkey = eip93_skcipher_setkey, + .encrypt = eip93_skcipher_encrypt, + .decrypt = eip93_skcipher_decrypt, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = 0, + .base = { + .cra_name = "ecb(des)", + .cra_driver_name = "ebc(des-eip93)", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_skcipher_cra_init, + .cra_exit = eip93_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_cbc_des = { + .type = EIP93_ALG_TYPE_SKCIPHER, + .flags = EIP93_MODE_CBC | EIP93_ALG_DES, + .alg.skcipher = { + .setkey = eip93_skcipher_setkey, + .encrypt = eip93_skcipher_encrypt, + .decrypt = eip93_skcipher_decrypt, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .ivsize = DES_BLOCK_SIZE, + .base = { + .cra_name = "cbc(des)", + .cra_driver_name = "cbc(des-eip93)", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_skcipher_cra_init, + .cra_exit = eip93_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_ecb_des3_ede = { + .type = EIP93_ALG_TYPE_SKCIPHER, + .flags = EIP93_MODE_ECB | EIP93_ALG_3DES, + .alg.skcipher = { + .setkey = eip93_skcipher_setkey, + .encrypt = eip93_skcipher_encrypt, + .decrypt = eip93_skcipher_decrypt, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = 0, + .base = { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "ecb(des3_ede-eip93)", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_skcipher_cra_init, + .cra_exit = eip93_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; + +struct eip93_alg_template eip93_alg_cbc_des3_ede = { + .type = EIP93_ALG_TYPE_SKCIPHER, + .flags = EIP93_MODE_CBC | EIP93_ALG_3DES, + .alg.skcipher = { + .setkey = eip93_skcipher_setkey, + .encrypt = eip93_skcipher_encrypt, + .decrypt = eip93_skcipher_decrypt, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .ivsize = DES3_EDE_BLOCK_SIZE, + .base = { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "cbc(des3_ede-eip93)", + .cra_priority = EIP93_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_crypto_ctx), + .cra_alignmask = 0, + .cra_init = eip93_skcipher_cra_init, + .cra_exit = eip93_skcipher_cra_exit, + .cra_module = THIS_MODULE, + }, + }, +}; diff --git a/drivers/crypto/inside-secure/eip93/eip93-cipher.h b/drivers/crypto/inside-secure/eip93/eip93-cipher.h new file mode 100644 index 000000000000..6e2545ebd879 --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-cipher.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen <vschagen@icloud.com> + * Christian Marangi <ansuelsmth@gmail.com + */ +#ifndef _EIP93_CIPHER_H_ +#define _EIP93_CIPHER_H_ + +#include "eip93-main.h" + +struct eip93_crypto_ctx { + struct eip93_device *eip93; + u32 flags; + struct sa_record *sa_record; + u32 sa_nonce; + int blksize; + dma_addr_t sa_record_base; + /* AEAD specific */ + unsigned int authsize; + unsigned int assoclen; + bool set_assoc; + enum eip93_alg_type type; +}; + +struct eip93_cipher_reqctx { + u16 desc_flags; + u16 flags; + unsigned int blksize; + unsigned int ivsize; + unsigned int textsize; + unsigned int assoclen; + unsigned int authsize; + dma_addr_t sa_record_base; + struct sa_state *sa_state; + dma_addr_t sa_state_base; + struct eip93_descriptor *cdesc; + struct scatterlist *sg_src; + struct scatterlist *sg_dst; + int src_nents; + int dst_nents; + struct sa_state *sa_state_ctr; + dma_addr_t sa_state_ctr_base; +}; + +int check_valid_request(struct eip93_cipher_reqctx *rctx); + +void eip93_unmap_dma(struct eip93_device *eip93, struct eip93_cipher_reqctx *rctx, + struct scatterlist *reqsrc, struct scatterlist *reqdst); + +void eip93_skcipher_handle_result(struct crypto_async_request *async, int err); + +int eip93_send_req(struct crypto_async_request *async, + const u8 *reqiv, struct eip93_cipher_reqctx *rctx); + +void eip93_handle_result(struct eip93_device *eip93, struct eip93_cipher_reqctx *rctx, + u8 *reqiv); + +#endif /* _EIP93_CIPHER_H_ */ diff --git a/drivers/crypto/inside-secure/eip93/eip93-common.c b/drivers/crypto/inside-secure/eip93/eip93-common.c new file mode 100644 index 000000000000..66153aa2493f --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-common.c @@ -0,0 +1,822 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen <vschagen@icloud.com> + * Christian Marangi <ansuelsmth@gmail.com + */ + +#include <crypto/aes.h> +#include <crypto/ctr.h> +#include <crypto/hmac.h> +#include <crypto/sha1.h> +#include <crypto/sha2.h> +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/scatterlist.h> + +#include "eip93-cipher.h" +#include "eip93-hash.h" +#include "eip93-common.h" +#include "eip93-main.h" +#include "eip93-regs.h" + +int eip93_parse_ctrl_stat_err(struct eip93_device *eip93, int err) +{ + u32 ext_err; + + if (!err) + return 0; + + switch (err & ~EIP93_PE_CTRL_PE_EXT_ERR_CODE) { + case EIP93_PE_CTRL_PE_AUTH_ERR: + case EIP93_PE_CTRL_PE_PAD_ERR: + return -EBADMSG; + /* let software handle anti-replay errors */ + case EIP93_PE_CTRL_PE_SEQNUM_ERR: + return 0; + case EIP93_PE_CTRL_PE_EXT_ERR: + break; + default: + dev_err(eip93->dev, "Unhandled error 0x%08x\n", err); + return -EINVAL; + } + + /* Parse additional ext errors */ + ext_err = FIELD_GET(EIP93_PE_CTRL_PE_EXT_ERR_CODE, err); + switch (ext_err) { + case EIP93_PE_CTRL_PE_EXT_ERR_BUS: + case EIP93_PE_CTRL_PE_EXT_ERR_PROCESSING: + return -EIO; + case EIP93_PE_CTRL_PE_EXT_ERR_DESC_OWNER: + return -EACCES; + case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_OP: + case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_ALGO: + case EIP93_PE_CTRL_PE_EXT_ERR_SPI: + return -EINVAL; + case EIP93_PE_CTRL_PE_EXT_ERR_ZERO_LENGTH: + case EIP93_PE_CTRL_PE_EXT_ERR_INVALID_PK_LENGTH: + case EIP93_PE_CTRL_PE_EXT_ERR_BLOCK_SIZE_ERR: + return -EBADMSG; + default: + dev_err(eip93->dev, "Unhandled ext error 0x%08x\n", ext_err); + return -EINVAL; + } +} + +static void *eip93_ring_next_wptr(struct eip93_device *eip93, + struct eip93_desc_ring *ring) +{ + void *ptr = ring->write; + + if ((ring->write == ring->read - ring->offset) || + (ring->read == ring->base && ring->write == ring->base_end)) + return ERR_PTR(-ENOMEM); + + if (ring->write == ring->base_end) + ring->write = ring->base; + else + ring->write += ring->offset; + + return ptr; +} + +static void *eip93_ring_next_rptr(struct eip93_device *eip93, + struct eip93_desc_ring *ring) +{ + void *ptr = ring->read; + + if (ring->write == ring->read) + return ERR_PTR(-ENOENT); + + if (ring->read == ring->base_end) + ring->read = ring->base; + else + ring->read += ring->offset; + + return ptr; +} + +int eip93_put_descriptor(struct eip93_device *eip93, + struct eip93_descriptor *desc) +{ + struct eip93_descriptor *cdesc; + struct eip93_descriptor *rdesc; + + rdesc = eip93_ring_next_wptr(eip93, &eip93->ring->rdr); + if (IS_ERR(rdesc)) + return -ENOENT; + + cdesc = eip93_ring_next_wptr(eip93, &eip93->ring->cdr); + if (IS_ERR(cdesc)) + return -ENOENT; + + memset(rdesc, 0, sizeof(struct eip93_descriptor)); + + memcpy(cdesc, desc, sizeof(struct eip93_descriptor)); + + return 0; +} + +void *eip93_get_descriptor(struct eip93_device *eip93) +{ + struct eip93_descriptor *cdesc; + void *ptr; + + cdesc = eip93_ring_next_rptr(eip93, &eip93->ring->cdr); + if (IS_ERR(cdesc)) + return ERR_PTR(-ENOENT); + + memset(cdesc, 0, sizeof(struct eip93_descriptor)); + + ptr = eip93_ring_next_rptr(eip93, &eip93->ring->rdr); + if (IS_ERR(ptr)) + return ERR_PTR(-ENOENT); + + return ptr; +} + +static void eip93_free_sg_copy(const int len, struct scatterlist **sg) +{ + if (!*sg || !len) + return; + + free_pages((unsigned long)sg_virt(*sg), get_order(len)); + kfree(*sg); + *sg = NULL; +} + +static int eip93_make_sg_copy(struct scatterlist *src, struct scatterlist **dst, + const u32 len, const bool copy) +{ + void *pages; + + *dst = kmalloc(sizeof(**dst), GFP_KERNEL); + if (!*dst) + return -ENOMEM; + + pages = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA, + get_order(len)); + if (!pages) { + kfree(*dst); + *dst = NULL; + return -ENOMEM; + } + + sg_init_table(*dst, 1); + sg_set_buf(*dst, pages, len); + + /* copy only as requested */ + if (copy) + sg_copy_to_buffer(src, sg_nents(src), pages, len); + + return 0; +} + +static bool eip93_is_sg_aligned(struct scatterlist *sg, u32 len, + const int blksize) +{ + int nents; + + for (nents = 0; sg; sg = sg_next(sg), ++nents) { + if (!IS_ALIGNED(sg->offset, 4)) + return false; + + if (len <= sg->length) { + if (!IS_ALIGNED(len, blksize)) + return false; + + return true; + } + + if (!IS_ALIGNED(sg->length, blksize)) + return false; + + len -= sg->length; + } + return false; +} + +int check_valid_request(struct eip93_cipher_reqctx *rctx) +{ + struct scatterlist *src = rctx->sg_src; + struct scatterlist *dst = rctx->sg_dst; + u32 textsize = rctx->textsize; + u32 authsize = rctx->authsize; + u32 blksize = rctx->blksize; + u32 totlen_src = rctx->assoclen + rctx->textsize; + u32 totlen_dst = rctx->assoclen + rctx->textsize; + u32 copy_len; + bool src_align, dst_align; + int src_nents, dst_nents; + int err = -EINVAL; + + if (!IS_CTR(rctx->flags)) { + if (!IS_ALIGNED(textsize, blksize)) + return err; + } + + if (authsize) { + if (IS_ENCRYPT(rctx->flags)) + totlen_dst += authsize; + else + totlen_src += authsize; + } + + src_nents = sg_nents_for_len(src, totlen_src); + if (src_nents < 0) + return src_nents; + + dst_nents = sg_nents_for_len(dst, totlen_dst); + if (dst_nents < 0) + return dst_nents; + + if (src == dst) { + src_nents = max(src_nents, dst_nents); + dst_nents = src_nents; + if (unlikely((totlen_src || totlen_dst) && !src_nents)) + return err; + + } else { + if (unlikely(totlen_src && !src_nents)) + return err; + + if (unlikely(totlen_dst && !dst_nents)) + return err; + } + + if (authsize) { + if (dst_nents == 1 && src_nents == 1) { + src_align = eip93_is_sg_aligned(src, totlen_src, blksize); + if (src == dst) + dst_align = src_align; + else + dst_align = eip93_is_sg_aligned(dst, totlen_dst, blksize); + } else { + src_align = false; + dst_align = false; + } + } else { + src_align = eip93_is_sg_aligned(src, totlen_src, blksize); + if (src == dst) + dst_align = src_align; + else + dst_align = eip93_is_sg_aligned(dst, totlen_dst, blksize); + } + + copy_len = max(totlen_src, totlen_dst); + if (!src_align) { + err = eip93_make_sg_copy(src, &rctx->sg_src, copy_len, true); + if (err) + return err; + } + + if (!dst_align) { + err = eip93_make_sg_copy(dst, &rctx->sg_dst, copy_len, false); + if (err) + return err; + } + + src_nents = sg_nents_for_len(rctx->sg_src, totlen_src); + if (src_nents < 0) + return src_nents; + + dst_nents = sg_nents_for_len(rctx->sg_dst, totlen_dst); + if (dst_nents < 0) + return dst_nents; + + rctx->src_nents = src_nents; + rctx->dst_nents = dst_nents; + + return 0; +} + +/* + * Set sa_record function: + * Even sa_record is set to "0", keep " = 0" for readability. + */ +void eip93_set_sa_record(struct sa_record *sa_record, const unsigned int keylen, + const u32 flags) +{ + /* Reset cmd word */ + sa_record->sa_cmd0_word = 0; + sa_record->sa_cmd1_word = 0; + + sa_record->sa_cmd0_word |= EIP93_SA_CMD_IV_FROM_STATE; + if (!IS_ECB(flags)) + sa_record->sa_cmd0_word |= EIP93_SA_CMD_SAVE_IV; + + sa_record->sa_cmd0_word |= EIP93_SA_CMD_OP_BASIC; + + switch ((flags & EIP93_ALG_MASK)) { + case EIP93_ALG_AES: + sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_AES; + sa_record->sa_cmd1_word |= FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH, + keylen >> 3); + break; + case EIP93_ALG_3DES: + sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_3DES; + break; + case EIP93_ALG_DES: + sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_DES; + break; + default: + sa_record->sa_cmd0_word |= EIP93_SA_CMD_CIPHER_NULL; + } + + switch ((flags & EIP93_HASH_MASK)) { + case EIP93_HASH_SHA256: + sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA256; + break; + case EIP93_HASH_SHA224: + sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA224; + break; + case EIP93_HASH_SHA1: + sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_SHA1; + break; + case EIP93_HASH_MD5: + sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_MD5; + break; + default: + sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_NULL; + } + + sa_record->sa_cmd0_word |= EIP93_SA_CMD_PAD_ZERO; + + switch ((flags & EIP93_MODE_MASK)) { + case EIP93_MODE_CBC: + sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_CBC; + break; + case EIP93_MODE_CTR: + sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_CTR; + break; + case EIP93_MODE_ECB: + sa_record->sa_cmd1_word |= EIP93_SA_CMD_CHIPER_MODE_ECB; + break; + } + + sa_record->sa_cmd0_word |= EIP93_SA_CMD_DIGEST_3WORD; + if (IS_HASH(flags)) { + sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_PAD; + sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_DIGEST; + } + + if (IS_HMAC(flags)) { + sa_record->sa_cmd1_word |= EIP93_SA_CMD_HMAC; + sa_record->sa_cmd1_word |= EIP93_SA_CMD_COPY_HEADER; + } + + sa_record->sa_spi = 0x0; + sa_record->sa_seqmum_mask[0] = 0xFFFFFFFF; + sa_record->sa_seqmum_mask[1] = 0x0; +} + +/* + * Poor mans Scatter/gather function: + * Create a Descriptor for every segment to avoid copying buffers. + * For performance better to wait for hardware to perform multiple DMA + */ +static int eip93_scatter_combine(struct eip93_device *eip93, + struct eip93_cipher_reqctx *rctx, + u32 datalen, u32 split, int offsetin) +{ + struct eip93_descriptor *cdesc = rctx->cdesc; + struct scatterlist *sgsrc = rctx->sg_src; + struct scatterlist *sgdst = rctx->sg_dst; + unsigned int remainin = sg_dma_len(sgsrc); + unsigned int remainout = sg_dma_len(sgdst); + dma_addr_t saddr = sg_dma_address(sgsrc); + dma_addr_t daddr = sg_dma_address(sgdst); + dma_addr_t state_addr; + u32 src_addr, dst_addr, len, n; + bool nextin = false; + bool nextout = false; + int offsetout = 0; + int err; + + if (IS_ECB(rctx->flags)) + rctx->sa_state_base = 0; + + if (split < datalen) { + state_addr = rctx->sa_state_ctr_base; + n = split; + } else { + state_addr = rctx->sa_state_base; + n = datalen; + } + + do { + if (nextin) { + sgsrc = sg_next(sgsrc); + remainin = sg_dma_len(sgsrc); + if (remainin == 0) + continue; + + saddr = sg_dma_address(sgsrc); + offsetin = 0; + nextin = false; + } + + if (nextout) { + sgdst = sg_next(sgdst); + remainout = sg_dma_len(sgdst); + if (remainout == 0) + continue; + + daddr = sg_dma_address(sgdst); + offsetout = 0; + nextout = false; + } + src_addr = saddr + offsetin; + dst_addr = daddr + offsetout; + + if (remainin == remainout) { + len = remainin; + if (len > n) { + len = n; + remainin -= n; + remainout -= n; + offsetin += n; + offsetout += n; + } else { + nextin = true; + nextout = true; + } + } else if (remainin < remainout) { + len = remainin; + if (len > n) { + len = n; + remainin -= n; + remainout -= n; + offsetin += n; + offsetout += n; + } else { + offsetout += len; + remainout -= len; + nextin = true; + } + } else { + len = remainout; + if (len > n) { + len = n; + remainin -= n; + remainout -= n; + offsetin += n; + offsetout += n; + } else { + offsetin += len; + remainin -= len; + nextout = true; + } + } + n -= len; + + cdesc->src_addr = src_addr; + cdesc->dst_addr = dst_addr; + cdesc->state_addr = state_addr; + cdesc->pe_length_word = FIELD_PREP(EIP93_PE_LENGTH_HOST_PE_READY, + EIP93_PE_LENGTH_HOST_READY); + cdesc->pe_length_word |= FIELD_PREP(EIP93_PE_LENGTH_LENGTH, len); + + if (n == 0) { + n = datalen - split; + split = datalen; + state_addr = rctx->sa_state_base; + } + + if (n == 0) + cdesc->user_id |= FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, + EIP93_DESC_LAST); + + /* + * Loop - Delay - No need to rollback + * Maybe refine by slowing down at EIP93_RING_BUSY + */ +again: + scoped_guard(spinlock_irqsave, &eip93->ring->write_lock) + err = eip93_put_descriptor(eip93, cdesc); + if (err) { + usleep_range(EIP93_RING_BUSY_DELAY, + EIP93_RING_BUSY_DELAY * 2); + goto again; + } + /* Writing new descriptor count starts DMA action */ + writel(1, eip93->base + EIP93_REG_PE_CD_COUNT); + } while (n); + + return -EINPROGRESS; +} + +int eip93_send_req(struct crypto_async_request *async, + const u8 *reqiv, struct eip93_cipher_reqctx *rctx) +{ + struct eip93_crypto_ctx *ctx = crypto_tfm_ctx(async->tfm); + struct eip93_device *eip93 = ctx->eip93; + struct scatterlist *src = rctx->sg_src; + struct scatterlist *dst = rctx->sg_dst; + struct sa_state *sa_state; + struct eip93_descriptor cdesc; + u32 flags = rctx->flags; + int offsetin = 0, err; + u32 datalen = rctx->assoclen + rctx->textsize; + u32 split = datalen; + u32 start, end, ctr, blocks; + u32 iv[AES_BLOCK_SIZE / sizeof(u32)]; + int crypto_async_idr; + + rctx->sa_state_ctr = NULL; + rctx->sa_state = NULL; + + if (IS_ECB(flags)) + goto skip_iv; + + memcpy(iv, reqiv, rctx->ivsize); + + rctx->sa_state = kzalloc(sizeof(*rctx->sa_state), GFP_KERNEL); + if (!rctx->sa_state) + return -ENOMEM; + + sa_state = rctx->sa_state; + + memcpy(sa_state->state_iv, iv, rctx->ivsize); + if (IS_RFC3686(flags)) { + sa_state->state_iv[0] = ctx->sa_nonce; + sa_state->state_iv[1] = iv[0]; + sa_state->state_iv[2] = iv[1]; + sa_state->state_iv[3] = (u32 __force)cpu_to_be32(0x1); + } else if (!IS_HMAC(flags) && IS_CTR(flags)) { + /* Compute data length. */ + blocks = DIV_ROUND_UP(rctx->textsize, AES_BLOCK_SIZE); + ctr = be32_to_cpu((__be32 __force)iv[3]); + /* Check 32bit counter overflow. */ + start = ctr; + end = start + blocks - 1; + if (end < start) { + split = AES_BLOCK_SIZE * -start; + /* + * Increment the counter manually to cope with + * the hardware counter overflow. + */ + iv[3] = 0xffffffff; + crypto_inc((u8 *)iv, AES_BLOCK_SIZE); + + rctx->sa_state_ctr = kzalloc(sizeof(*rctx->sa_state_ctr), + GFP_KERNEL); + if (!rctx->sa_state_ctr) { + err = -ENOMEM; + goto free_sa_state; + } + + memcpy(rctx->sa_state_ctr->state_iv, reqiv, rctx->ivsize); + memcpy(sa_state->state_iv, iv, rctx->ivsize); + + rctx->sa_state_ctr_base = dma_map_single(eip93->dev, rctx->sa_state_ctr, + sizeof(*rctx->sa_state_ctr), + DMA_TO_DEVICE); + err = dma_mapping_error(eip93->dev, rctx->sa_state_ctr_base); + if (err) + goto free_sa_state_ctr; + } + } + + rctx->sa_state_base = dma_map_single(eip93->dev, rctx->sa_state, + sizeof(*rctx->sa_state), DMA_TO_DEVICE); + err = dma_mapping_error(eip93->dev, rctx->sa_state_base); + if (err) + goto free_sa_state_ctr_dma; + +skip_iv: + + cdesc.pe_ctrl_stat_word = FIELD_PREP(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN, + EIP93_PE_CTRL_HOST_READY); + cdesc.sa_addr = rctx->sa_record_base; + cdesc.arc4_addr = 0; + + scoped_guard(spinlock_bh, &eip93->ring->idr_lock) + crypto_async_idr = idr_alloc(&eip93->ring->crypto_async_idr, async, 0, + EIP93_RING_NUM - 1, GFP_ATOMIC); + + cdesc.user_id = FIELD_PREP(EIP93_PE_USER_ID_CRYPTO_IDR, (u16)crypto_async_idr) | + FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, rctx->desc_flags); + + rctx->cdesc = &cdesc; + + /* map DMA_BIDIRECTIONAL to invalidate cache on destination + * implies __dma_cache_wback_inv + */ + if (!dma_map_sg(eip93->dev, dst, rctx->dst_nents, DMA_BIDIRECTIONAL)) { + err = -ENOMEM; + goto free_sa_state_ctr_dma; + } + + if (src != dst && + !dma_map_sg(eip93->dev, src, rctx->src_nents, DMA_TO_DEVICE)) { + err = -ENOMEM; + goto free_sg_dma; + } + + return eip93_scatter_combine(eip93, rctx, datalen, split, offsetin); + +free_sg_dma: + dma_unmap_sg(eip93->dev, dst, rctx->dst_nents, DMA_BIDIRECTIONAL); +free_sa_state_ctr_dma: + if (rctx->sa_state_ctr) + dma_unmap_single(eip93->dev, rctx->sa_state_ctr_base, + sizeof(*rctx->sa_state_ctr), + DMA_TO_DEVICE); +free_sa_state_ctr: + kfree(rctx->sa_state_ctr); + if (rctx->sa_state) + dma_unmap_single(eip93->dev, rctx->sa_state_base, + sizeof(*rctx->sa_state), + DMA_TO_DEVICE); +free_sa_state: + kfree(rctx->sa_state); + + return err; +} + +void eip93_unmap_dma(struct eip93_device *eip93, struct eip93_cipher_reqctx *rctx, + struct scatterlist *reqsrc, struct scatterlist *reqdst) +{ + u32 len = rctx->assoclen + rctx->textsize; + u32 authsize = rctx->authsize; + u32 flags = rctx->flags; + u32 *otag; + int i; + + if (rctx->sg_src == rctx->sg_dst) { + dma_unmap_sg(eip93->dev, rctx->sg_dst, rctx->dst_nents, + DMA_BIDIRECTIONAL); + goto process_tag; + } + + dma_unmap_sg(eip93->dev, rctx->sg_src, rctx->src_nents, + DMA_TO_DEVICE); + + if (rctx->sg_src != reqsrc) + eip93_free_sg_copy(len + rctx->authsize, &rctx->sg_src); + + dma_unmap_sg(eip93->dev, rctx->sg_dst, rctx->dst_nents, + DMA_BIDIRECTIONAL); + + /* SHA tags need conversion from net-to-host */ +process_tag: + if (IS_DECRYPT(flags)) + authsize = 0; + + if (authsize) { + if (!IS_HASH_MD5(flags)) { + otag = sg_virt(rctx->sg_dst) + len; + for (i = 0; i < (authsize / 4); i++) + otag[i] = be32_to_cpu((__be32 __force)otag[i]); + } + } + + if (rctx->sg_dst != reqdst) { + sg_copy_from_buffer(reqdst, sg_nents(reqdst), + sg_virt(rctx->sg_dst), len + authsize); + eip93_free_sg_copy(len + rctx->authsize, &rctx->sg_dst); + } +} + +void eip93_handle_result(struct eip93_device *eip93, struct eip93_cipher_reqctx *rctx, + u8 *reqiv) +{ + if (rctx->sa_state_ctr) + dma_unmap_single(eip93->dev, rctx->sa_state_ctr_base, + sizeof(*rctx->sa_state_ctr), + DMA_FROM_DEVICE); + + if (rctx->sa_state) + dma_unmap_single(eip93->dev, rctx->sa_state_base, + sizeof(*rctx->sa_state), + DMA_FROM_DEVICE); + + if (!IS_ECB(rctx->flags)) + memcpy(reqiv, rctx->sa_state->state_iv, rctx->ivsize); + + kfree(rctx->sa_state_ctr); + kfree(rctx->sa_state); +} + +int eip93_hmac_setkey(u32 ctx_flags, const u8 *key, unsigned int keylen, + unsigned int hashlen, u8 *dest_ipad, u8 *dest_opad, + bool skip_ipad) +{ + u8 ipad[SHA256_BLOCK_SIZE], opad[SHA256_BLOCK_SIZE]; + struct crypto_ahash *ahash_tfm; + struct eip93_hash_reqctx *rctx; + struct ahash_request *req; + DECLARE_CRYPTO_WAIT(wait); + struct scatterlist sg[1]; + const char *alg_name; + int i, ret; + + switch (ctx_flags & EIP93_HASH_MASK) { + case EIP93_HASH_SHA256: + alg_name = "sha256-eip93"; + break; + case EIP93_HASH_SHA224: + alg_name = "sha224-eip93"; + break; + case EIP93_HASH_SHA1: + alg_name = "sha1-eip93"; + break; + case EIP93_HASH_MD5: + alg_name = "md5-eip93"; + break; + default: /* Impossible */ + return -EINVAL; + } + + ahash_tfm = crypto_alloc_ahash(alg_name, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(ahash_tfm)) + return PTR_ERR(ahash_tfm); + + req = ahash_request_alloc(ahash_tfm, GFP_ATOMIC); + if (!req) { + ret = -ENOMEM; + goto err_ahash; + } + + rctx = ahash_request_ctx_dma(req); + crypto_init_wait(&wait); + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + crypto_req_done, &wait); + + /* Hash the key if > SHA256_BLOCK_SIZE */ + if (keylen > SHA256_BLOCK_SIZE) { + sg_init_one(&sg[0], key, keylen); + + ahash_request_set_crypt(req, sg, ipad, keylen); + ret = crypto_wait_req(crypto_ahash_digest(req), &wait); + if (ret) + goto err_req; + + keylen = hashlen; + } else { + memcpy(ipad, key, keylen); + } + + /* Copy to opad */ + memset(ipad + keylen, 0, SHA256_BLOCK_SIZE - keylen); + memcpy(opad, ipad, SHA256_BLOCK_SIZE); + + /* Pad with HMAC constants */ + for (i = 0; i < SHA256_BLOCK_SIZE; i++) { + ipad[i] ^= HMAC_IPAD_VALUE; + opad[i] ^= HMAC_OPAD_VALUE; + } + + if (skip_ipad) { + memcpy(dest_ipad, ipad, SHA256_BLOCK_SIZE); + } else { + /* Hash ipad */ + sg_init_one(&sg[0], ipad, SHA256_BLOCK_SIZE); + ahash_request_set_crypt(req, sg, dest_ipad, SHA256_BLOCK_SIZE); + ret = crypto_ahash_init(req); + if (ret) + goto err_req; + + /* Disable HASH_FINALIZE for ipad hash */ + rctx->partial_hash = true; + + ret = crypto_wait_req(crypto_ahash_finup(req), &wait); + if (ret) + goto err_req; + } + + /* Hash opad */ + sg_init_one(&sg[0], opad, SHA256_BLOCK_SIZE); + ahash_request_set_crypt(req, sg, dest_opad, SHA256_BLOCK_SIZE); + ret = crypto_ahash_init(req); + if (ret) + goto err_req; + + /* Disable HASH_FINALIZE for opad hash */ + rctx->partial_hash = true; + + ret = crypto_wait_req(crypto_ahash_finup(req), &wait); + if (ret) + goto err_req; + + if (!IS_HASH_MD5(ctx_flags)) { + for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++) { + u32 *ipad_hash = (u32 *)dest_ipad; + u32 *opad_hash = (u32 *)dest_opad; + + if (!skip_ipad) + ipad_hash[i] = (u32 __force)cpu_to_be32(ipad_hash[i]); + opad_hash[i] = (u32 __force)cpu_to_be32(opad_hash[i]); + } + } + +err_req: + ahash_request_free(req); +err_ahash: + crypto_free_ahash(ahash_tfm); + + return ret; +} diff --git a/drivers/crypto/inside-secure/eip93/eip93-common.h b/drivers/crypto/inside-secure/eip93/eip93-common.h new file mode 100644 index 000000000000..80964cfa34df --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-common.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen <vschagen@icloud.com> + * Christian Marangi <ansuelsmth@gmail.com + */ + +#ifndef _EIP93_COMMON_H_ +#define _EIP93_COMMON_H_ + +void *eip93_get_descriptor(struct eip93_device *eip93); +int eip93_put_descriptor(struct eip93_device *eip93, struct eip93_descriptor *desc); + +void eip93_set_sa_record(struct sa_record *sa_record, const unsigned int keylen, + const u32 flags); + +int eip93_parse_ctrl_stat_err(struct eip93_device *eip93, int err); + +int eip93_hmac_setkey(u32 ctx_flags, const u8 *key, unsigned int keylen, + unsigned int hashlen, u8 *ipad, u8 *opad, + bool skip_ipad); + +#endif /* _EIP93_COMMON_H_ */ diff --git a/drivers/crypto/inside-secure/eip93/eip93-des.h b/drivers/crypto/inside-secure/eip93/eip93-des.h new file mode 100644 index 000000000000..74748df04acf --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-des.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen <vschagen@icloud.com> + * Christian Marangi <ansuelsmth@gmail.com + */ +#ifndef _EIP93_DES_H_ +#define _EIP93_DES_H_ + +extern struct eip93_alg_template eip93_alg_ecb_des; +extern struct eip93_alg_template eip93_alg_cbc_des; +extern struct eip93_alg_template eip93_alg_ecb_des3_ede; +extern struct eip93_alg_template eip93_alg_cbc_des3_ede; + +#endif /* _EIP93_DES_H_ */ diff --git a/drivers/crypto/inside-secure/eip93/eip93-hash.c b/drivers/crypto/inside-secure/eip93/eip93-hash.c new file mode 100644 index 000000000000..5e9627467a42 --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-hash.c @@ -0,0 +1,866 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2024 + * + * Christian Marangi <ansuelsmth@gmail.com + */ + +#include <crypto/sha1.h> +#include <crypto/sha2.h> +#include <crypto/md5.h> +#include <crypto/hmac.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> + +#include "eip93-cipher.h" +#include "eip93-hash.h" +#include "eip93-main.h" +#include "eip93-common.h" +#include "eip93-regs.h" + +static void eip93_hash_free_data_blocks(struct ahash_request *req) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct eip93_device *eip93 = ctx->eip93; + struct mkt_hash_block *block, *tmp; + + list_for_each_entry_safe(block, tmp, &rctx->blocks, list) { + dma_unmap_single(eip93->dev, block->data_dma, + SHA256_BLOCK_SIZE, DMA_TO_DEVICE); + kfree(block); + } + if (!list_empty(&rctx->blocks)) + INIT_LIST_HEAD(&rctx->blocks); + + if (rctx->finalize) + dma_unmap_single(eip93->dev, rctx->data_dma, + rctx->data_used, + DMA_TO_DEVICE); +} + +static void eip93_hash_free_sa_record(struct ahash_request *req) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct eip93_device *eip93 = ctx->eip93; + + if (IS_HMAC(ctx->flags)) + dma_unmap_single(eip93->dev, rctx->sa_record_hmac_base, + sizeof(rctx->sa_record_hmac), DMA_TO_DEVICE); + + dma_unmap_single(eip93->dev, rctx->sa_record_base, + sizeof(rctx->sa_record), DMA_TO_DEVICE); +} + +void eip93_hash_handle_result(struct crypto_async_request *async, int err) +{ + struct ahash_request *req = ahash_request_cast(async); + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); + int digestsize = crypto_ahash_digestsize(ahash); + struct sa_state *sa_state = &rctx->sa_state; + struct eip93_device *eip93 = ctx->eip93; + int i; + + dma_unmap_single(eip93->dev, rctx->sa_state_base, + sizeof(*sa_state), DMA_FROM_DEVICE); + + /* + * With partial_hash assume SHA256_DIGEST_SIZE buffer is passed. + * This is to handle SHA224 that have a 32 byte intermediate digest. + */ + if (rctx->partial_hash) + digestsize = SHA256_DIGEST_SIZE; + + if (rctx->finalize || rctx->partial_hash) { + /* bytes needs to be swapped for req->result */ + if (!IS_HASH_MD5(ctx->flags)) { + for (i = 0; i < digestsize / sizeof(u32); i++) { + u32 *digest = (u32 *)sa_state->state_i_digest; + + digest[i] = be32_to_cpu((__be32 __force)digest[i]); + } + } + + memcpy(req->result, sa_state->state_i_digest, digestsize); + } + + eip93_hash_free_sa_record(req); + eip93_hash_free_data_blocks(req); + + ahash_request_complete(req, err); +} + +static void eip93_hash_init_sa_state_digest(u32 hash, u8 *digest) +{ + u32 sha256_init[] = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, + SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 }; + u32 sha224_init[] = { SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, + SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7 }; + u32 sha1_init[] = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }; + u32 md5_init[] = { MD5_H0, MD5_H1, MD5_H2, MD5_H3 }; + + /* Init HASH constant */ + switch (hash) { + case EIP93_HASH_SHA256: + memcpy(digest, sha256_init, sizeof(sha256_init)); + return; + case EIP93_HASH_SHA224: + memcpy(digest, sha224_init, sizeof(sha224_init)); + return; + case EIP93_HASH_SHA1: + memcpy(digest, sha1_init, sizeof(sha1_init)); + return; + case EIP93_HASH_MD5: + memcpy(digest, md5_init, sizeof(md5_init)); + return; + default: /* Impossible */ + return; + } +} + +static void eip93_hash_export_sa_state(struct ahash_request *req, + struct eip93_hash_export_state *state) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct sa_state *sa_state = &rctx->sa_state; + + /* + * EIP93 have special handling for state_byte_cnt in sa_state. + * Even if a zero packet is passed (and a BADMSG is returned), + * state_byte_cnt is incremented to the digest handled (with the hash + * primitive). This is problematic with export/import as EIP93 + * expect 0 state_byte_cnt for the very first iteration. + */ + if (!rctx->len) + memset(state->state_len, 0, sizeof(u32) * 2); + else + memcpy(state->state_len, sa_state->state_byte_cnt, + sizeof(u32) * 2); + memcpy(state->state_hash, sa_state->state_i_digest, + SHA256_DIGEST_SIZE); + state->len = rctx->len; + state->data_used = rctx->data_used; +} + +static void __eip93_hash_init(struct ahash_request *req) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct sa_record *sa_record = &rctx->sa_record; + int digestsize; + + digestsize = crypto_ahash_digestsize(ahash); + + eip93_set_sa_record(sa_record, 0, ctx->flags); + sa_record->sa_cmd0_word |= EIP93_SA_CMD_HASH_FROM_STATE; + sa_record->sa_cmd0_word |= EIP93_SA_CMD_SAVE_HASH; + sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_OPCODE; + sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_OPCODE, + EIP93_SA_CMD_OPCODE_BASIC_OUT_HASH); + sa_record->sa_cmd0_word &= ~EIP93_SA_CMD_DIGEST_LENGTH; + sa_record->sa_cmd0_word |= FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, + digestsize / sizeof(u32)); + + /* + * HMAC special handling + * Enabling CMD_HMAC force the inner hash to be always finalized. + * This cause problems on handling message > 64 byte as we + * need to produce intermediate inner hash on sending intermediate + * 64 bytes blocks. + * + * To handle this, enable CMD_HMAC only on the last block. + * We make a duplicate of sa_record and on the last descriptor, + * we pass a dedicated sa_record with CMD_HMAC enabled to make + * EIP93 apply the outer hash. + */ + if (IS_HMAC(ctx->flags)) { + struct sa_record *sa_record_hmac = &rctx->sa_record_hmac; + + memcpy(sa_record_hmac, sa_record, sizeof(*sa_record)); + /* Copy pre-hashed opad for HMAC */ + memcpy(sa_record_hmac->sa_o_digest, ctx->opad, SHA256_DIGEST_SIZE); + + /* Disable HMAC for hash normal sa_record */ + sa_record->sa_cmd1_word &= ~EIP93_SA_CMD_HMAC; + } + + rctx->len = 0; + rctx->data_used = 0; + rctx->partial_hash = false; + rctx->finalize = false; + INIT_LIST_HEAD(&rctx->blocks); +} + +static int eip93_send_hash_req(struct crypto_async_request *async, u8 *data, + dma_addr_t *data_dma, u32 len, bool last) +{ + struct ahash_request *req = ahash_request_cast(async); + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct eip93_device *eip93 = ctx->eip93; + struct eip93_descriptor cdesc = { }; + dma_addr_t src_addr; + int ret; + + /* Map block data to DMA */ + src_addr = dma_map_single(eip93->dev, data, len, DMA_TO_DEVICE); + ret = dma_mapping_error(eip93->dev, src_addr); + if (ret) + return ret; + + cdesc.pe_ctrl_stat_word = FIELD_PREP(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN, + EIP93_PE_CTRL_HOST_READY); + cdesc.sa_addr = rctx->sa_record_base; + cdesc.arc4_addr = 0; + + cdesc.state_addr = rctx->sa_state_base; + cdesc.src_addr = src_addr; + cdesc.pe_length_word = FIELD_PREP(EIP93_PE_LENGTH_HOST_PE_READY, + EIP93_PE_LENGTH_HOST_READY); + cdesc.pe_length_word |= FIELD_PREP(EIP93_PE_LENGTH_LENGTH, + len); + + cdesc.user_id |= FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, EIP93_DESC_HASH); + + if (last) { + int crypto_async_idr; + + if (rctx->finalize && !rctx->partial_hash) { + /* For last block, pass sa_record with CMD_HMAC enabled */ + if (IS_HMAC(ctx->flags)) { + struct sa_record *sa_record_hmac = &rctx->sa_record_hmac; + + rctx->sa_record_hmac_base = dma_map_single(eip93->dev, + sa_record_hmac, + sizeof(*sa_record_hmac), + DMA_TO_DEVICE); + ret = dma_mapping_error(eip93->dev, rctx->sa_record_hmac_base); + if (ret) + return ret; + + cdesc.sa_addr = rctx->sa_record_hmac_base; + } + + cdesc.pe_ctrl_stat_word |= EIP93_PE_CTRL_PE_HASH_FINAL; + } + + scoped_guard(spinlock_bh, &eip93->ring->idr_lock) + crypto_async_idr = idr_alloc(&eip93->ring->crypto_async_idr, async, 0, + EIP93_RING_NUM - 1, GFP_ATOMIC); + + cdesc.user_id |= FIELD_PREP(EIP93_PE_USER_ID_CRYPTO_IDR, (u16)crypto_async_idr) | + FIELD_PREP(EIP93_PE_USER_ID_DESC_FLAGS, EIP93_DESC_LAST); + } + +again: + ret = eip93_put_descriptor(eip93, &cdesc); + if (ret) { + usleep_range(EIP93_RING_BUSY_DELAY, + EIP93_RING_BUSY_DELAY * 2); + goto again; + } + + /* Writing new descriptor count starts DMA action */ + writel(1, eip93->base + EIP93_REG_PE_CD_COUNT); + + *data_dma = src_addr; + return 0; +} + +static int eip93_hash_init(struct ahash_request *req) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct sa_state *sa_state = &rctx->sa_state; + + memset(sa_state->state_byte_cnt, 0, sizeof(u32) * 2); + eip93_hash_init_sa_state_digest(ctx->flags & EIP93_HASH_MASK, + sa_state->state_i_digest); + + __eip93_hash_init(req); + + /* For HMAC setup the initial block for ipad */ + if (IS_HMAC(ctx->flags)) { + memcpy(rctx->data, ctx->ipad, SHA256_BLOCK_SIZE); + + rctx->data_used = SHA256_BLOCK_SIZE; + rctx->len += SHA256_BLOCK_SIZE; + } + + return 0; +} + +/* + * With complete_req true, we wait for the engine to consume all the block in list, + * else we just queue the block to the engine as final() will wait. This is useful + * for finup(). + */ +static int __eip93_hash_update(struct ahash_request *req, bool complete_req) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_async_request *async = &req->base; + unsigned int read, to_consume = req->nbytes; + unsigned int max_read, consumed = 0; + struct mkt_hash_block *block; + bool wait_req = false; + int offset; + int ret; + + /* Get the offset and available space to fill req data */ + offset = rctx->data_used; + max_read = SHA256_BLOCK_SIZE - offset; + + /* Consume req in block of SHA256_BLOCK_SIZE. + * to_read is initially set to space available in the req data + * and then reset to SHA256_BLOCK_SIZE. + */ + while (to_consume > max_read) { + block = kzalloc(sizeof(*block), GFP_ATOMIC); + if (!block) { + ret = -ENOMEM; + goto free_blocks; + } + + read = sg_pcopy_to_buffer(req->src, sg_nents(req->src), + block->data + offset, + max_read, consumed); + + /* + * For first iteration only, copy req data to block + * and reset offset and max_read for next iteration. + */ + if (offset > 0) { + memcpy(block->data, rctx->data, offset); + offset = 0; + max_read = SHA256_BLOCK_SIZE; + } + + list_add(&block->list, &rctx->blocks); + to_consume -= read; + consumed += read; + } + + /* Write the remaining data to req data */ + read = sg_pcopy_to_buffer(req->src, sg_nents(req->src), + rctx->data + offset, to_consume, + consumed); + rctx->data_used = offset + read; + + /* Update counter with processed bytes */ + rctx->len += read + consumed; + + /* Consume all the block added to list */ + list_for_each_entry_reverse(block, &rctx->blocks, list) { + wait_req = complete_req && + list_is_first(&block->list, &rctx->blocks); + + ret = eip93_send_hash_req(async, block->data, + &block->data_dma, + SHA256_BLOCK_SIZE, wait_req); + if (ret) + goto free_blocks; + } + + return wait_req ? -EINPROGRESS : 0; + +free_blocks: + eip93_hash_free_data_blocks(req); + + return ret; +} + +static int eip93_hash_update(struct ahash_request *req) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct sa_record *sa_record = &rctx->sa_record; + struct sa_state *sa_state = &rctx->sa_state; + struct eip93_device *eip93 = ctx->eip93; + int ret; + + if (!req->nbytes) + return 0; + + rctx->sa_state_base = dma_map_single(eip93->dev, sa_state, + sizeof(*sa_state), + DMA_TO_DEVICE); + ret = dma_mapping_error(eip93->dev, rctx->sa_state_base); + if (ret) + return ret; + + rctx->sa_record_base = dma_map_single(eip93->dev, sa_record, + sizeof(*sa_record), + DMA_TO_DEVICE); + ret = dma_mapping_error(eip93->dev, rctx->sa_record_base); + if (ret) + goto free_sa_state; + + ret = __eip93_hash_update(req, true); + if (ret && ret != -EINPROGRESS) + goto free_sa_record; + + return ret; + +free_sa_record: + dma_unmap_single(eip93->dev, rctx->sa_record_base, + sizeof(*sa_record), DMA_TO_DEVICE); + +free_sa_state: + dma_unmap_single(eip93->dev, rctx->sa_state_base, + sizeof(*sa_state), DMA_TO_DEVICE); + + return ret; +} + +/* + * With map_data true, we map the sa_record and sa_state. This is needed + * for finup() as the they are mapped before calling update() + */ +static int __eip93_hash_final(struct ahash_request *req, bool map_dma) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct crypto_async_request *async = &req->base; + struct sa_record *sa_record = &rctx->sa_record; + struct sa_state *sa_state = &rctx->sa_state; + struct eip93_device *eip93 = ctx->eip93; + int ret; + + /* EIP93 can't handle zero bytes hash */ + if (!rctx->len && !IS_HMAC(ctx->flags)) { + switch ((ctx->flags & EIP93_HASH_MASK)) { + case EIP93_HASH_SHA256: + memcpy(req->result, sha256_zero_message_hash, + SHA256_DIGEST_SIZE); + break; + case EIP93_HASH_SHA224: + memcpy(req->result, sha224_zero_message_hash, + SHA224_DIGEST_SIZE); + break; + case EIP93_HASH_SHA1: + memcpy(req->result, sha1_zero_message_hash, + SHA1_DIGEST_SIZE); + break; + case EIP93_HASH_MD5: + memcpy(req->result, md5_zero_message_hash, + MD5_DIGEST_SIZE); + break; + default: /* Impossible */ + return -EINVAL; + } + + return 0; + } + + /* Signal interrupt from engine is for last block */ + rctx->finalize = true; + + if (map_dma) { + rctx->sa_state_base = dma_map_single(eip93->dev, sa_state, + sizeof(*sa_state), + DMA_TO_DEVICE); + ret = dma_mapping_error(eip93->dev, rctx->sa_state_base); + if (ret) + return ret; + + rctx->sa_record_base = dma_map_single(eip93->dev, sa_record, + sizeof(*sa_record), + DMA_TO_DEVICE); + ret = dma_mapping_error(eip93->dev, rctx->sa_record_base); + if (ret) + goto free_sa_state; + } + + /* Send last block */ + ret = eip93_send_hash_req(async, rctx->data, &rctx->data_dma, + rctx->data_used, true); + if (ret) + goto free_blocks; + + return -EINPROGRESS; + +free_blocks: + eip93_hash_free_data_blocks(req); + + dma_unmap_single(eip93->dev, rctx->sa_record_base, + sizeof(*sa_record), DMA_TO_DEVICE); + +free_sa_state: + dma_unmap_single(eip93->dev, rctx->sa_state_base, + sizeof(*sa_state), DMA_TO_DEVICE); + + return ret; +} + +static int eip93_hash_final(struct ahash_request *req) +{ + return __eip93_hash_final(req, true); +} + +static int eip93_hash_finup(struct ahash_request *req) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); + struct eip93_hash_ctx *ctx = crypto_ahash_ctx(ahash); + struct sa_record *sa_record = &rctx->sa_record; + struct sa_state *sa_state = &rctx->sa_state; + struct eip93_device *eip93 = ctx->eip93; + int ret; + + if (rctx->len + req->nbytes || IS_HMAC(ctx->flags)) { + rctx->sa_state_base = dma_map_single(eip93->dev, sa_state, + sizeof(*sa_state), + DMA_TO_DEVICE); + ret = dma_mapping_error(eip93->dev, rctx->sa_state_base); + if (ret) + return ret; + + rctx->sa_record_base = dma_map_single(eip93->dev, sa_record, + sizeof(*sa_record), + DMA_TO_DEVICE); + ret = dma_mapping_error(eip93->dev, rctx->sa_record_base); + if (ret) + goto free_sa_state; + + ret = __eip93_hash_update(req, false); + if (ret) + goto free_sa_record; + } + + return __eip93_hash_final(req, false); + +free_sa_record: + dma_unmap_single(eip93->dev, rctx->sa_record_base, + sizeof(*sa_record), DMA_TO_DEVICE); +free_sa_state: + dma_unmap_single(eip93->dev, rctx->sa_state_base, + sizeof(*sa_state), DMA_TO_DEVICE); + + return ret; +} + +static int eip93_hash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key, + u32 keylen) +{ + unsigned int digestsize = crypto_ahash_digestsize(ahash); + struct crypto_tfm *tfm = crypto_ahash_tfm(ahash); + struct eip93_hash_ctx *ctx = crypto_tfm_ctx(tfm); + + return eip93_hmac_setkey(ctx->flags, key, keylen, digestsize, + ctx->ipad, ctx->opad, true); +} + +static int eip93_hash_cra_init(struct crypto_tfm *tfm) +{ + struct eip93_hash_ctx *ctx = crypto_tfm_ctx(tfm); + struct eip93_alg_template *tmpl = container_of(tfm->__crt_alg, + struct eip93_alg_template, alg.ahash.halg.base); + + crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm), + sizeof(struct eip93_hash_reqctx)); + + ctx->eip93 = tmpl->eip93; + ctx->flags = tmpl->flags; + + return 0; +} + +static int eip93_hash_digest(struct ahash_request *req) +{ + int ret; + + ret = eip93_hash_init(req); + if (ret) + return ret; + + return eip93_hash_finup(req); +} + +static int eip93_hash_import(struct ahash_request *req, const void *in) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + const struct eip93_hash_export_state *state = in; + struct sa_state *sa_state = &rctx->sa_state; + + memcpy(sa_state->state_byte_cnt, state->state_len, sizeof(u32) * 2); + memcpy(sa_state->state_i_digest, state->state_hash, SHA256_DIGEST_SIZE); + + __eip93_hash_init(req); + + rctx->len = state->len; + rctx->data_used = state->data_used; + + /* Skip copying data if we have nothing to copy */ + if (rctx->len) + memcpy(rctx->data, state->data, rctx->data_used); + + return 0; +} + +static int eip93_hash_export(struct ahash_request *req, void *out) +{ + struct eip93_hash_reqctx *rctx = ahash_request_ctx_dma(req); + struct eip93_hash_export_state *state = out; + + /* Save the first block in state data */ + if (rctx->len) + memcpy(state->data, rctx->data, rctx->data_used); + + eip93_hash_export_sa_state(req, state); + + return 0; +} + +struct eip93_alg_template eip93_alg_md5 = { + .type = EIP93_ALG_TYPE_HASH, + .flags = EIP93_HASH_MD5, + .alg.ahash = { + .init = eip93_hash_init, + .update = eip93_hash_update, + .final = eip93_hash_final, + .finup = eip93_hash_finup, + .digest = eip93_hash_digest, + .export = eip93_hash_export, + .import = eip93_hash_import, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .statesize = sizeof(struct eip93_hash_export_state), + .base = { + .cra_name = "md5", + .cra_driver_name = "md5-eip93", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_hash_ctx), + .cra_init = eip93_hash_cra_init, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +struct eip93_alg_template eip93_alg_sha1 = { + .type = EIP93_ALG_TYPE_HASH, + .flags = EIP93_HASH_SHA1, + .alg.ahash = { + .init = eip93_hash_init, + .update = eip93_hash_update, + .final = eip93_hash_final, + .finup = eip93_hash_finup, + .digest = eip93_hash_digest, + .export = eip93_hash_export, + .import = eip93_hash_import, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct eip93_hash_export_state), + .base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-eip93", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_hash_ctx), + .cra_init = eip93_hash_cra_init, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +struct eip93_alg_template eip93_alg_sha224 = { + .type = EIP93_ALG_TYPE_HASH, + .flags = EIP93_HASH_SHA224, + .alg.ahash = { + .init = eip93_hash_init, + .update = eip93_hash_update, + .final = eip93_hash_final, + .finup = eip93_hash_finup, + .digest = eip93_hash_digest, + .export = eip93_hash_export, + .import = eip93_hash_import, + .halg = { + .digestsize = SHA224_DIGEST_SIZE, + .statesize = sizeof(struct eip93_hash_export_state), + .base = { + .cra_name = "sha224", + .cra_driver_name = "sha224-eip93", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_hash_ctx), + .cra_init = eip93_hash_cra_init, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +struct eip93_alg_template eip93_alg_sha256 = { + .type = EIP93_ALG_TYPE_HASH, + .flags = EIP93_HASH_SHA256, + .alg.ahash = { + .init = eip93_hash_init, + .update = eip93_hash_update, + .final = eip93_hash_final, + .finup = eip93_hash_finup, + .digest = eip93_hash_digest, + .export = eip93_hash_export, + .import = eip93_hash_import, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct eip93_hash_export_state), + .base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-eip93", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_hash_ctx), + .cra_init = eip93_hash_cra_init, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +struct eip93_alg_template eip93_alg_hmac_md5 = { + .type = EIP93_ALG_TYPE_HASH, + .flags = EIP93_HASH_HMAC | EIP93_HASH_MD5, + .alg.ahash = { + .init = eip93_hash_init, + .update = eip93_hash_update, + .final = eip93_hash_final, + .finup = eip93_hash_finup, + .digest = eip93_hash_digest, + .setkey = eip93_hash_hmac_setkey, + .export = eip93_hash_export, + .import = eip93_hash_import, + .halg = { + .digestsize = MD5_DIGEST_SIZE, + .statesize = sizeof(struct eip93_hash_export_state), + .base = { + .cra_name = "hmac(md5)", + .cra_driver_name = "hmac(md5-eip93)", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = MD5_HMAC_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_hash_ctx), + .cra_init = eip93_hash_cra_init, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +struct eip93_alg_template eip93_alg_hmac_sha1 = { + .type = EIP93_ALG_TYPE_HASH, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA1, + .alg.ahash = { + .init = eip93_hash_init, + .update = eip93_hash_update, + .final = eip93_hash_final, + .finup = eip93_hash_finup, + .digest = eip93_hash_digest, + .setkey = eip93_hash_hmac_setkey, + .export = eip93_hash_export, + .import = eip93_hash_import, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct eip93_hash_export_state), + .base = { + .cra_name = "hmac(sha1)", + .cra_driver_name = "hmac(sha1-eip93)", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_hash_ctx), + .cra_init = eip93_hash_cra_init, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +struct eip93_alg_template eip93_alg_hmac_sha224 = { + .type = EIP93_ALG_TYPE_HASH, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA224, + .alg.ahash = { + .init = eip93_hash_init, + .update = eip93_hash_update, + .final = eip93_hash_final, + .finup = eip93_hash_finup, + .digest = eip93_hash_digest, + .setkey = eip93_hash_hmac_setkey, + .export = eip93_hash_export, + .import = eip93_hash_import, + .halg = { + .digestsize = SHA224_DIGEST_SIZE, + .statesize = sizeof(struct eip93_hash_export_state), + .base = { + .cra_name = "hmac(sha224)", + .cra_driver_name = "hmac(sha224-eip93)", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = SHA224_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_hash_ctx), + .cra_init = eip93_hash_cra_init, + .cra_module = THIS_MODULE, + }, + }, + }, +}; + +struct eip93_alg_template eip93_alg_hmac_sha256 = { + .type = EIP93_ALG_TYPE_HASH, + .flags = EIP93_HASH_HMAC | EIP93_HASH_SHA256, + .alg.ahash = { + .init = eip93_hash_init, + .update = eip93_hash_update, + .final = eip93_hash_final, + .finup = eip93_hash_finup, + .digest = eip93_hash_digest, + .setkey = eip93_hash_hmac_setkey, + .export = eip93_hash_export, + .import = eip93_hash_import, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct eip93_hash_export_state), + .base = { + .cra_name = "hmac(sha256)", + .cra_driver_name = "hmac(sha256-eip93)", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_ALLOCATES_MEMORY, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct eip93_hash_ctx), + .cra_init = eip93_hash_cra_init, + .cra_module = THIS_MODULE, + }, + }, + }, +}; diff --git a/drivers/crypto/inside-secure/eip93/eip93-hash.h b/drivers/crypto/inside-secure/eip93/eip93-hash.h new file mode 100644 index 000000000000..556f22fc1dd0 --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-hash.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen <vschagen@icloud.com> + * Christian Marangi <ansuelsmth@gmail.com + */ +#ifndef _EIP93_HASH_H_ +#define _EIP93_HASH_H_ + +#include <crypto/sha2.h> + +#include "eip93-main.h" +#include "eip93-regs.h" + +struct eip93_hash_ctx { + struct eip93_device *eip93; + u32 flags; + + u8 ipad[SHA256_BLOCK_SIZE] __aligned(sizeof(u32)); + u8 opad[SHA256_DIGEST_SIZE] __aligned(sizeof(u32)); +}; + +struct eip93_hash_reqctx { + /* Placement is important for DMA align */ + struct { + struct sa_record sa_record; + struct sa_record sa_record_hmac; + struct sa_state sa_state; + } __aligned(CRYPTO_DMA_ALIGN); + + dma_addr_t sa_record_base; + dma_addr_t sa_record_hmac_base; + dma_addr_t sa_state_base; + + /* Don't enable HASH_FINALIZE when last block is sent */ + bool partial_hash; + + /* Set to signal interrupt is for final packet */ + bool finalize; + + /* + * EIP93 requires data to be accumulated in block of 64 bytes + * for intermediate hash calculation. + */ + u64 len; + u32 data_used; + + u8 data[SHA256_BLOCK_SIZE] __aligned(sizeof(u32)); + dma_addr_t data_dma; + + struct list_head blocks; +}; + +struct mkt_hash_block { + struct list_head list; + u8 data[SHA256_BLOCK_SIZE] __aligned(sizeof(u32)); + dma_addr_t data_dma; +}; + +struct eip93_hash_export_state { + u64 len; + u32 data_used; + + u32 state_len[2]; + u8 state_hash[SHA256_DIGEST_SIZE] __aligned(sizeof(u32)); + + u8 data[SHA256_BLOCK_SIZE] __aligned(sizeof(u32)); +}; + +void eip93_hash_handle_result(struct crypto_async_request *async, int err); + +extern struct eip93_alg_template eip93_alg_md5; +extern struct eip93_alg_template eip93_alg_sha1; +extern struct eip93_alg_template eip93_alg_sha224; +extern struct eip93_alg_template eip93_alg_sha256; +extern struct eip93_alg_template eip93_alg_hmac_md5; +extern struct eip93_alg_template eip93_alg_hmac_sha1; +extern struct eip93_alg_template eip93_alg_hmac_sha224; +extern struct eip93_alg_template eip93_alg_hmac_sha256; + +#endif /* _EIP93_HASH_H_ */ diff --git a/drivers/crypto/inside-secure/eip93/eip93-main.c b/drivers/crypto/inside-secure/eip93/eip93-main.c new file mode 100644 index 000000000000..0b38a567da0e --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-main.c @@ -0,0 +1,501 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen <vschagen@icloud.com> + * Christian Marangi <ansuelsmth@gmail.com + */ + +#include <linux/atomic.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> +#include <crypto/aes.h> +#include <crypto/ctr.h> + +#include "eip93-main.h" +#include "eip93-regs.h" +#include "eip93-common.h" +#include "eip93-cipher.h" +#include "eip93-aes.h" +#include "eip93-des.h" +#include "eip93-aead.h" +#include "eip93-hash.h" + +static struct eip93_alg_template *eip93_algs[] = { + &eip93_alg_ecb_des, + &eip93_alg_cbc_des, + &eip93_alg_ecb_des3_ede, + &eip93_alg_cbc_des3_ede, + &eip93_alg_ecb_aes, + &eip93_alg_cbc_aes, + &eip93_alg_ctr_aes, + &eip93_alg_rfc3686_aes, + &eip93_alg_authenc_hmac_md5_cbc_des, + &eip93_alg_authenc_hmac_sha1_cbc_des, + &eip93_alg_authenc_hmac_sha224_cbc_des, + &eip93_alg_authenc_hmac_sha256_cbc_des, + &eip93_alg_authenc_hmac_md5_cbc_des3_ede, + &eip93_alg_authenc_hmac_sha1_cbc_des3_ede, + &eip93_alg_authenc_hmac_sha224_cbc_des3_ede, + &eip93_alg_authenc_hmac_sha256_cbc_des3_ede, + &eip93_alg_authenc_hmac_md5_cbc_aes, + &eip93_alg_authenc_hmac_sha1_cbc_aes, + &eip93_alg_authenc_hmac_sha224_cbc_aes, + &eip93_alg_authenc_hmac_sha256_cbc_aes, + &eip93_alg_authenc_hmac_md5_rfc3686_aes, + &eip93_alg_authenc_hmac_sha1_rfc3686_aes, + &eip93_alg_authenc_hmac_sha224_rfc3686_aes, + &eip93_alg_authenc_hmac_sha256_rfc3686_aes, + &eip93_alg_md5, + &eip93_alg_sha1, + &eip93_alg_sha224, + &eip93_alg_sha256, + &eip93_alg_hmac_md5, + &eip93_alg_hmac_sha1, + &eip93_alg_hmac_sha224, + &eip93_alg_hmac_sha256, +}; + +inline void eip93_irq_disable(struct eip93_device *eip93, u32 mask) +{ + __raw_writel(mask, eip93->base + EIP93_REG_MASK_DISABLE); +} + +inline void eip93_irq_enable(struct eip93_device *eip93, u32 mask) +{ + __raw_writel(mask, eip93->base + EIP93_REG_MASK_ENABLE); +} + +inline void eip93_irq_clear(struct eip93_device *eip93, u32 mask) +{ + __raw_writel(mask, eip93->base + EIP93_REG_INT_CLR); +} + +static void eip93_unregister_algs(unsigned int i) +{ + unsigned int j; + + for (j = 0; j < i; j++) { + switch (eip93_algs[j]->type) { + case EIP93_ALG_TYPE_SKCIPHER: + crypto_unregister_skcipher(&eip93_algs[j]->alg.skcipher); + break; + case EIP93_ALG_TYPE_AEAD: + crypto_unregister_aead(&eip93_algs[j]->alg.aead); + break; + case EIP93_ALG_TYPE_HASH: + crypto_unregister_ahash(&eip93_algs[i]->alg.ahash); + break; + } + } +} + +static int eip93_register_algs(struct eip93_device *eip93, u32 supported_algo_flags) +{ + unsigned int i; + int ret = 0; + + for (i = 0; i < ARRAY_SIZE(eip93_algs); i++) { + u32 alg_flags = eip93_algs[i]->flags; + + eip93_algs[i]->eip93 = eip93; + + if ((IS_DES(alg_flags) || IS_3DES(alg_flags)) && + !(supported_algo_flags & EIP93_PE_OPTION_TDES)) + continue; + + if (IS_AES(alg_flags)) { + if (!(supported_algo_flags & EIP93_PE_OPTION_AES)) + continue; + + if (!IS_HMAC(alg_flags)) { + if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY128) + eip93_algs[i]->alg.skcipher.max_keysize = + AES_KEYSIZE_128; + + if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY192) + eip93_algs[i]->alg.skcipher.max_keysize = + AES_KEYSIZE_192; + + if (supported_algo_flags & EIP93_PE_OPTION_AES_KEY256) + eip93_algs[i]->alg.skcipher.max_keysize = + AES_KEYSIZE_256; + + if (IS_RFC3686(alg_flags)) + eip93_algs[i]->alg.skcipher.max_keysize += + CTR_RFC3686_NONCE_SIZE; + } + } + + if (IS_HASH_MD5(alg_flags) && + !(supported_algo_flags & EIP93_PE_OPTION_MD5)) + continue; + + if (IS_HASH_SHA1(alg_flags) && + !(supported_algo_flags & EIP93_PE_OPTION_SHA_1)) + continue; + + if (IS_HASH_SHA224(alg_flags) && + !(supported_algo_flags & EIP93_PE_OPTION_SHA_224)) + continue; + + if (IS_HASH_SHA256(alg_flags) && + !(supported_algo_flags & EIP93_PE_OPTION_SHA_256)) + continue; + + switch (eip93_algs[i]->type) { + case EIP93_ALG_TYPE_SKCIPHER: + ret = crypto_register_skcipher(&eip93_algs[i]->alg.skcipher); + break; + case EIP93_ALG_TYPE_AEAD: + ret = crypto_register_aead(&eip93_algs[i]->alg.aead); + break; + case EIP93_ALG_TYPE_HASH: + ret = crypto_register_ahash(&eip93_algs[i]->alg.ahash); + break; + } + if (ret) + goto fail; + } + + return 0; + +fail: + eip93_unregister_algs(i); + + return ret; +} + +static void eip93_handle_result_descriptor(struct eip93_device *eip93) +{ + struct crypto_async_request *async; + struct eip93_descriptor *rdesc; + u16 desc_flags, crypto_idr; + bool last_entry; + int handled, left, err; + u32 pe_ctrl_stat; + u32 pe_length; + +get_more: + handled = 0; + + left = readl(eip93->base + EIP93_REG_PE_RD_COUNT) & EIP93_PE_RD_COUNT; + + if (!left) { + eip93_irq_clear(eip93, EIP93_INT_RDR_THRESH); + eip93_irq_enable(eip93, EIP93_INT_RDR_THRESH); + return; + } + + last_entry = false; + + while (left) { + scoped_guard(spinlock_irqsave, &eip93->ring->read_lock) + rdesc = eip93_get_descriptor(eip93); + if (IS_ERR(rdesc)) { + dev_err(eip93->dev, "Ndesc: %d nreq: %d\n", + handled, left); + err = -EIO; + break; + } + /* make sure DMA is finished writing */ + do { + pe_ctrl_stat = READ_ONCE(rdesc->pe_ctrl_stat_word); + pe_length = READ_ONCE(rdesc->pe_length_word); + } while (FIELD_GET(EIP93_PE_CTRL_PE_READY_DES_TRING_OWN, pe_ctrl_stat) != + EIP93_PE_CTRL_PE_READY || + FIELD_GET(EIP93_PE_LENGTH_HOST_PE_READY, pe_length) != + EIP93_PE_LENGTH_PE_READY); + + err = rdesc->pe_ctrl_stat_word & (EIP93_PE_CTRL_PE_EXT_ERR_CODE | + EIP93_PE_CTRL_PE_EXT_ERR | + EIP93_PE_CTRL_PE_SEQNUM_ERR | + EIP93_PE_CTRL_PE_PAD_ERR | + EIP93_PE_CTRL_PE_AUTH_ERR); + + desc_flags = FIELD_GET(EIP93_PE_USER_ID_DESC_FLAGS, rdesc->user_id); + crypto_idr = FIELD_GET(EIP93_PE_USER_ID_CRYPTO_IDR, rdesc->user_id); + + writel(1, eip93->base + EIP93_REG_PE_RD_COUNT); + eip93_irq_clear(eip93, EIP93_INT_RDR_THRESH); + + handled++; + left--; + + if (desc_flags & EIP93_DESC_LAST) { + last_entry = true; + break; + } + } + + if (!last_entry) + goto get_more; + + /* Get crypto async ref only for last descriptor */ + scoped_guard(spinlock_bh, &eip93->ring->idr_lock) { + async = idr_find(&eip93->ring->crypto_async_idr, crypto_idr); + idr_remove(&eip93->ring->crypto_async_idr, crypto_idr); + } + + /* Parse error in ctrl stat word */ + err = eip93_parse_ctrl_stat_err(eip93, err); + + if (desc_flags & EIP93_DESC_SKCIPHER) + eip93_skcipher_handle_result(async, err); + + if (desc_flags & EIP93_DESC_AEAD) + eip93_aead_handle_result(async, err); + + if (desc_flags & EIP93_DESC_HASH) + eip93_hash_handle_result(async, err); + + goto get_more; +} + +static void eip93_done_task(unsigned long data) +{ + struct eip93_device *eip93 = (struct eip93_device *)data; + + eip93_handle_result_descriptor(eip93); +} + +static irqreturn_t eip93_irq_handler(int irq, void *data) +{ + struct eip93_device *eip93 = data; + u32 irq_status; + + irq_status = readl(eip93->base + EIP93_REG_INT_MASK_STAT); + if (FIELD_GET(EIP93_INT_RDR_THRESH, irq_status)) { + eip93_irq_disable(eip93, EIP93_INT_RDR_THRESH); + tasklet_schedule(&eip93->ring->done_task); + return IRQ_HANDLED; + } + + /* Ignore errors in AUTO mode, handled by the RDR */ + eip93_irq_clear(eip93, irq_status); + if (irq_status) + eip93_irq_disable(eip93, irq_status); + + return IRQ_NONE; +} + +static void eip93_initialize(struct eip93_device *eip93, u32 supported_algo_flags) +{ + u32 val; + + /* Reset PE and rings */ + val = EIP93_PE_CONFIG_RST_PE | EIP93_PE_CONFIG_RST_RING; + val |= EIP93_PE_TARGET_AUTO_RING_MODE; + /* For Auto more, update the CDR ring owner after processing */ + val |= EIP93_PE_CONFIG_EN_CDR_UPDATE; + writel(val, eip93->base + EIP93_REG_PE_CONFIG); + + /* Wait for PE and ring to reset */ + usleep_range(10, 20); + + /* Release PE and ring reset */ + val = readl(eip93->base + EIP93_REG_PE_CONFIG); + val &= ~(EIP93_PE_CONFIG_RST_PE | EIP93_PE_CONFIG_RST_RING); + writel(val, eip93->base + EIP93_REG_PE_CONFIG); + + /* Config Clocks */ + val = EIP93_PE_CLOCK_EN_PE_CLK; + if (supported_algo_flags & EIP93_PE_OPTION_TDES) + val |= EIP93_PE_CLOCK_EN_DES_CLK; + if (supported_algo_flags & EIP93_PE_OPTION_AES) + val |= EIP93_PE_CLOCK_EN_AES_CLK; + if (supported_algo_flags & + (EIP93_PE_OPTION_MD5 | EIP93_PE_OPTION_SHA_1 | EIP93_PE_OPTION_SHA_224 | + EIP93_PE_OPTION_SHA_256)) + val |= EIP93_PE_CLOCK_EN_HASH_CLK; + writel(val, eip93->base + EIP93_REG_PE_CLOCK_CTRL); + + /* Config DMA thresholds */ + val = FIELD_PREP(EIP93_PE_OUTBUF_THRESH, 128) | + FIELD_PREP(EIP93_PE_INBUF_THRESH, 128); + writel(val, eip93->base + EIP93_REG_PE_BUF_THRESH); + + /* Clear/ack all interrupts before disable all */ + eip93_irq_clear(eip93, EIP93_INT_ALL); + eip93_irq_disable(eip93, EIP93_INT_ALL); + + /* Setup CRD threshold to trigger interrupt */ + val = FIELD_PREP(EIPR93_PE_CDR_THRESH, EIP93_RING_NUM - EIP93_RING_BUSY); + /* + * Configure RDR interrupt to be triggered if RD counter is not 0 + * for more than 2^(N+10) system clocks. + */ + val |= FIELD_PREP(EIPR93_PE_RD_TIMEOUT, 5) | EIPR93_PE_TIMEROUT_EN; + writel(val, eip93->base + EIP93_REG_PE_RING_THRESH); +} + +static void eip93_desc_free(struct eip93_device *eip93) +{ + writel(0, eip93->base + EIP93_REG_PE_RING_CONFIG); + writel(0, eip93->base + EIP93_REG_PE_CDR_BASE); + writel(0, eip93->base + EIP93_REG_PE_RDR_BASE); +} + +static int eip93_set_ring(struct eip93_device *eip93, struct eip93_desc_ring *ring) +{ + ring->offset = sizeof(struct eip93_descriptor); + ring->base = dmam_alloc_coherent(eip93->dev, + sizeof(struct eip93_descriptor) * EIP93_RING_NUM, + &ring->base_dma, GFP_KERNEL); + if (!ring->base) + return -ENOMEM; + + ring->write = ring->base; + ring->base_end = ring->base + sizeof(struct eip93_descriptor) * (EIP93_RING_NUM - 1); + ring->read = ring->base; + + return 0; +} + +static int eip93_desc_init(struct eip93_device *eip93) +{ + struct eip93_desc_ring *cdr = &eip93->ring->cdr; + struct eip93_desc_ring *rdr = &eip93->ring->rdr; + int ret; + u32 val; + + ret = eip93_set_ring(eip93, cdr); + if (ret) + return ret; + + ret = eip93_set_ring(eip93, rdr); + if (ret) + return ret; + + writel((u32 __force)cdr->base_dma, eip93->base + EIP93_REG_PE_CDR_BASE); + writel((u32 __force)rdr->base_dma, eip93->base + EIP93_REG_PE_RDR_BASE); + + val = FIELD_PREP(EIP93_PE_RING_SIZE, EIP93_RING_NUM - 1); + writel(val, eip93->base + EIP93_REG_PE_RING_CONFIG); + + return 0; +} + +static void eip93_cleanup(struct eip93_device *eip93) +{ + tasklet_kill(&eip93->ring->done_task); + + /* Clear/ack all interrupts before disable all */ + eip93_irq_clear(eip93, EIP93_INT_ALL); + eip93_irq_disable(eip93, EIP93_INT_ALL); + + writel(0, eip93->base + EIP93_REG_PE_CLOCK_CTRL); + + eip93_desc_free(eip93); + + idr_destroy(&eip93->ring->crypto_async_idr); +} + +static int eip93_crypto_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct eip93_device *eip93; + u32 ver, algo_flags; + int ret; + + eip93 = devm_kzalloc(dev, sizeof(*eip93), GFP_KERNEL); + if (!eip93) + return -ENOMEM; + + eip93->dev = dev; + platform_set_drvdata(pdev, eip93); + + eip93->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(eip93->base)) + return PTR_ERR(eip93->base); + + eip93->irq = platform_get_irq(pdev, 0); + if (eip93->irq < 0) + return eip93->irq; + + ret = devm_request_threaded_irq(eip93->dev, eip93->irq, eip93_irq_handler, + NULL, IRQF_ONESHOT, + dev_name(eip93->dev), eip93); + + eip93->ring = devm_kcalloc(eip93->dev, 1, sizeof(*eip93->ring), GFP_KERNEL); + if (!eip93->ring) + return -ENOMEM; + + ret = eip93_desc_init(eip93); + + if (ret) + return ret; + + tasklet_init(&eip93->ring->done_task, eip93_done_task, (unsigned long)eip93); + + spin_lock_init(&eip93->ring->read_lock); + spin_lock_init(&eip93->ring->write_lock); + + spin_lock_init(&eip93->ring->idr_lock); + idr_init(&eip93->ring->crypto_async_idr); + + algo_flags = readl(eip93->base + EIP93_REG_PE_OPTION_1); + + eip93_initialize(eip93, algo_flags); + + /* Init finished, enable RDR interrupt */ + eip93_irq_enable(eip93, EIP93_INT_RDR_THRESH); + + ret = eip93_register_algs(eip93, algo_flags); + if (ret) { + eip93_cleanup(eip93); + return ret; + } + + ver = readl(eip93->base + EIP93_REG_PE_REVISION); + /* EIP_EIP_NO:MAJOR_HW_REV:MINOR_HW_REV:HW_PATCH,PE(ALGO_FLAGS) */ + dev_info(eip93->dev, "EIP%lu:%lx:%lx:%lx,PE(0x%x:0x%x)\n", + FIELD_GET(EIP93_PE_REVISION_EIP_NO, ver), + FIELD_GET(EIP93_PE_REVISION_MAJ_HW_REV, ver), + FIELD_GET(EIP93_PE_REVISION_MIN_HW_REV, ver), + FIELD_GET(EIP93_PE_REVISION_HW_PATCH, ver), + algo_flags, + readl(eip93->base + EIP93_REG_PE_OPTION_0)); + + return 0; +} + +static void eip93_crypto_remove(struct platform_device *pdev) +{ + struct eip93_device *eip93 = platform_get_drvdata(pdev); + + eip93_unregister_algs(ARRAY_SIZE(eip93_algs)); + eip93_cleanup(eip93); +} + +static const struct of_device_id eip93_crypto_of_match[] = { + { .compatible = "inside-secure,safexcel-eip93i", }, + { .compatible = "inside-secure,safexcel-eip93ie", }, + { .compatible = "inside-secure,safexcel-eip93is", }, + { .compatible = "inside-secure,safexcel-eip93ies", }, + /* IW not supported currently, missing AES-XCB-MAC/AES-CCM */ + /* { .compatible = "inside-secure,safexcel-eip93iw", }, */ + {} +}; +MODULE_DEVICE_TABLE(of, eip93_crypto_of_match); + +static struct platform_driver eip93_crypto_driver = { + .probe = eip93_crypto_probe, + .remove = eip93_crypto_remove, + .driver = { + .name = "inside-secure-eip93", + .of_match_table = eip93_crypto_of_match, + }, +}; +module_platform_driver(eip93_crypto_driver); + +MODULE_AUTHOR("Richard van Schagen <vschagen@cs.com>"); +MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>"); +MODULE_DESCRIPTION("Mediatek EIP-93 crypto engine driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/crypto/inside-secure/eip93/eip93-main.h b/drivers/crypto/inside-secure/eip93/eip93-main.h new file mode 100644 index 000000000000..79b078f0e5da --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-main.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen <vschagen@icloud.com> + * Christian Marangi <ansuelsmth@gmail.com + */ +#ifndef _EIP93_MAIN_H_ +#define _EIP93_MAIN_H_ + +#include <crypto/internal/aead.h> +#include <crypto/internal/hash.h> +#include <crypto/internal/skcipher.h> +#include <linux/bitfield.h> +#include <linux/interrupt.h> + +#define EIP93_RING_BUSY_DELAY 500 + +#define EIP93_RING_NUM 512 +#define EIP93_RING_BUSY 32 +#define EIP93_CRA_PRIORITY 1500 + +#define EIP93_RING_SA_STATE_ADDR(base, idx) ((base) + (idx)) +#define EIP93_RING_SA_STATE_DMA(dma_base, idx) ((u32 __force)(dma_base) + \ + ((idx) * sizeof(struct sa_state))) + +/* cipher algorithms */ +#define EIP93_ALG_DES BIT(0) +#define EIP93_ALG_3DES BIT(1) +#define EIP93_ALG_AES BIT(2) +#define EIP93_ALG_MASK GENMASK(2, 0) +/* hash and hmac algorithms */ +#define EIP93_HASH_MD5 BIT(3) +#define EIP93_HASH_SHA1 BIT(4) +#define EIP93_HASH_SHA224 BIT(5) +#define EIP93_HASH_SHA256 BIT(6) +#define EIP93_HASH_HMAC BIT(7) +#define EIP93_HASH_MASK GENMASK(6, 3) +/* cipher modes */ +#define EIP93_MODE_CBC BIT(8) +#define EIP93_MODE_ECB BIT(9) +#define EIP93_MODE_CTR BIT(10) +#define EIP93_MODE_RFC3686 BIT(11) +#define EIP93_MODE_MASK GENMASK(10, 8) + +/* cipher encryption/decryption operations */ +#define EIP93_ENCRYPT BIT(12) +#define EIP93_DECRYPT BIT(13) + +#define EIP93_BUSY BIT(14) + +/* descriptor flags */ +#define EIP93_DESC_DMA_IV BIT(0) +#define EIP93_DESC_IPSEC BIT(1) +#define EIP93_DESC_FINISH BIT(2) +#define EIP93_DESC_LAST BIT(3) +#define EIP93_DESC_FAKE_HMAC BIT(4) +#define EIP93_DESC_PRNG BIT(5) +#define EIP93_DESC_HASH BIT(6) +#define EIP93_DESC_AEAD BIT(7) +#define EIP93_DESC_SKCIPHER BIT(8) +#define EIP93_DESC_ASYNC BIT(9) + +#define IS_DMA_IV(desc_flags) ((desc_flags) & EIP93_DESC_DMA_IV) + +#define IS_DES(flags) ((flags) & EIP93_ALG_DES) +#define IS_3DES(flags) ((flags) & EIP93_ALG_3DES) +#define IS_AES(flags) ((flags) & EIP93_ALG_AES) + +#define IS_HASH_MD5(flags) ((flags) & EIP93_HASH_MD5) +#define IS_HASH_SHA1(flags) ((flags) & EIP93_HASH_SHA1) +#define IS_HASH_SHA224(flags) ((flags) & EIP93_HASH_SHA224) +#define IS_HASH_SHA256(flags) ((flags) & EIP93_HASH_SHA256) +#define IS_HMAC(flags) ((flags) & EIP93_HASH_HMAC) + +#define IS_CBC(mode) ((mode) & EIP93_MODE_CBC) +#define IS_ECB(mode) ((mode) & EIP93_MODE_ECB) +#define IS_CTR(mode) ((mode) & EIP93_MODE_CTR) +#define IS_RFC3686(mode) ((mode) & EIP93_MODE_RFC3686) + +#define IS_BUSY(flags) ((flags) & EIP93_BUSY) + +#define IS_ENCRYPT(dir) ((dir) & EIP93_ENCRYPT) +#define IS_DECRYPT(dir) ((dir) & EIP93_DECRYPT) + +#define IS_CIPHER(flags) ((flags) & (EIP93_ALG_DES | \ + EIP93_ALG_3DES | \ + EIP93_ALG_AES)) + +#define IS_HASH(flags) ((flags) & (EIP93_HASH_MD5 | \ + EIP93_HASH_SHA1 | \ + EIP93_HASH_SHA224 | \ + EIP93_HASH_SHA256)) + +/** + * struct eip93_device - crypto engine device structure + */ +struct eip93_device { + void __iomem *base; + struct device *dev; + struct clk *clk; + int irq; + struct eip93_ring *ring; +}; + +struct eip93_desc_ring { + void *base; + void *base_end; + dma_addr_t base_dma; + /* write and read pointers */ + void *read; + void *write; + /* descriptor element offset */ + u32 offset; +}; + +struct eip93_state_pool { + void *base; + dma_addr_t base_dma; +}; + +struct eip93_ring { + struct tasklet_struct done_task; + /* command/result rings */ + struct eip93_desc_ring cdr; + struct eip93_desc_ring rdr; + spinlock_t write_lock; + spinlock_t read_lock; + /* aync idr */ + spinlock_t idr_lock; + struct idr crypto_async_idr; +}; + +enum eip93_alg_type { + EIP93_ALG_TYPE_AEAD, + EIP93_ALG_TYPE_SKCIPHER, + EIP93_ALG_TYPE_HASH, +}; + +struct eip93_alg_template { + struct eip93_device *eip93; + enum eip93_alg_type type; + u32 flags; + union { + struct aead_alg aead; + struct skcipher_alg skcipher; + struct ahash_alg ahash; + } alg; +}; + +#endif /* _EIP93_MAIN_H_ */ diff --git a/drivers/crypto/inside-secure/eip93/eip93-regs.h b/drivers/crypto/inside-secure/eip93/eip93-regs.h new file mode 100644 index 000000000000..0490b8d15131 --- /dev/null +++ b/drivers/crypto/inside-secure/eip93/eip93-regs.h @@ -0,0 +1,335 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 - 2021 + * + * Richard van Schagen <vschagen@icloud.com> + * Christian Marangi <ansuelsmth@gmail.com + */ +#ifndef REG_EIP93_H +#define REG_EIP93_H + +#define EIP93_REG_PE_CTRL_STAT 0x0 +#define EIP93_PE_CTRL_PE_PAD_CTRL_STAT GENMASK(31, 24) +#define EIP93_PE_CTRL_PE_EXT_ERR_CODE GENMASK(23, 20) +#define EIP93_PE_CTRL_PE_EXT_ERR_PROCESSING 0x8 +#define EIP93_PE_CTRL_PE_EXT_ERR_BLOCK_SIZE_ERR 0x7 +#define EIP93_PE_CTRL_PE_EXT_ERR_INVALID_PK_LENGTH 0x6 +#define EIP93_PE_CTRL_PE_EXT_ERR_ZERO_LENGTH 0x5 +#define EIP93_PE_CTRL_PE_EXT_ERR_SPI 0x4 +#define EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_ALGO 0x3 +#define EIP93_PE_CTRL_PE_EXT_ERR_INVALID_CRYPTO_OP 0x2 +#define EIP93_PE_CTRL_PE_EXT_ERR_DESC_OWNER 0x1 +#define EIP93_PE_CTRL_PE_EXT_ERR_BUS 0x0 +#define EIP93_PE_CTRL_PE_EXT_ERR BIT(19) +#define EIP93_PE_CTRL_PE_SEQNUM_ERR BIT(18) +#define EIP93_PE_CTRL_PE_PAD_ERR BIT(17) +#define EIP93_PE_CTRL_PE_AUTH_ERR BIT(16) +#define EIP93_PE_CTRL_PE_PAD_VALUE GENMASK(15, 8) +#define EIP93_PE_CTRL_PE_PRNG_MODE GENMASK(7, 6) +#define EIP93_PE_CTRL_PE_HASH_FINAL BIT(4) +#define EIP93_PE_CTRL_PE_INIT_ARC4 BIT(3) +#define EIP93_PE_CTRL_PE_READY_DES_TRING_OWN GENMASK(1, 0) +#define EIP93_PE_CTRL_PE_READY 0x2 +#define EIP93_PE_CTRL_HOST_READY 0x1 +#define EIP93_REG_PE_SOURCE_ADDR 0x4 +#define EIP93_REG_PE_DEST_ADDR 0x8 +#define EIP93_REG_PE_SA_ADDR 0xc +#define EIP93_REG_PE_ADDR 0x10 /* STATE_ADDR */ +/* + * Special implementation for user ID + * user_id in eip93_descriptor is used to identify the + * descriptor and is opaque and can be used by the driver + * in custom way. + * + * The usage of this should be to put an address to the crypto + * request struct from the kernel but this can't work in 64bit + * world. + * + * Also it's required to put some flags to identify the last + * descriptor. + * + * To handle this, split the u32 in 2 part: + * - 31:16 descriptor flags + * - 15:0 IDR to connect the crypto request address + */ +#define EIP93_REG_PE_USER_ID 0x18 +#define EIP93_PE_USER_ID_DESC_FLAGS GENMASK(31, 16) +#define EIP93_PE_USER_ID_CRYPTO_IDR GENMASK(15, 0) +#define EIP93_REG_PE_LENGTH 0x1c +#define EIP93_PE_LENGTH_BYPASS GENMASK(31, 24) +#define EIP93_PE_LENGTH_HOST_PE_READY GENMASK(23, 22) +#define EIP93_PE_LENGTH_PE_READY 0x2 +#define EIP93_PE_LENGTH_HOST_READY 0x1 +#define EIP93_PE_LENGTH_LENGTH GENMASK(19, 0) + +/* PACKET ENGINE RING configuration registers */ +#define EIP93_REG_PE_CDR_BASE 0x80 +#define EIP93_REG_PE_RDR_BASE 0x84 +#define EIP93_REG_PE_RING_CONFIG 0x88 +#define EIP93_PE_EN_EXT_TRIG BIT(31) +/* Absent in later revision of eip93 */ +/* #define EIP93_PE_RING_OFFSET GENMASK(23, 15) */ +#define EIP93_PE_RING_SIZE GENMASK(9, 0) +#define EIP93_REG_PE_RING_THRESH 0x8c +#define EIPR93_PE_TIMEROUT_EN BIT(31) +#define EIPR93_PE_RD_TIMEOUT GENMASK(29, 26) +#define EIPR93_PE_RDR_THRESH GENMASK(25, 16) +#define EIPR93_PE_CDR_THRESH GENMASK(9, 0) +#define EIP93_REG_PE_CD_COUNT 0x90 +#define EIP93_PE_CD_COUNT GENMASK(10, 0) +/* + * In the same register, writing a value in GENMASK(7, 0) will + * increment the descriptor count and start DMA action. + */ +#define EIP93_PE_CD_COUNT_INCR GENMASK(7, 0) +#define EIP93_REG_PE_RD_COUNT 0x94 +#define EIP93_PE_RD_COUNT GENMASK(10, 0) +/* + * In the same register, writing a value in GENMASK(7, 0) will + * increment the descriptor count and start DMA action. + */ +#define EIP93_PE_RD_COUNT_INCR GENMASK(7, 0) +#define EIP93_REG_PE_RING_RW_PNTR 0x98 /* RING_PNTR */ + +/* PACKET ENGINE configuration registers */ +#define EIP93_REG_PE_CONFIG 0x100 +#define EIP93_PE_CONFIG_SWAP_TARGET BIT(20) +#define EIP93_PE_CONFIG_SWAP_DATA BIT(18) +#define EIP93_PE_CONFIG_SWAP_SA BIT(17) +#define EIP93_PE_CONFIG_SWAP_CDRD BIT(16) +#define EIP93_PE_CONFIG_EN_CDR_UPDATE BIT(10) +#define EIP93_PE_CONFIG_PE_MODE GENMASK(9, 8) +#define EIP93_PE_TARGET_AUTO_RING_MODE FIELD_PREP(EIP93_PE_CONFIG_PE_MODE, 0x3) +#define EIP93_PE_TARGET_COMMAND_NO_RDR_MODE FIELD_PREP(EIP93_PE_CONFIG_PE_MODE, 0x2) +#define EIP93_PE_TARGET_COMMAND_WITH_RDR_MODE FIELD_PREP(EIP93_PE_CONFIG_PE_MODE, 0x1) +#define EIP93_PE_DIRECT_HOST_MODE FIELD_PREP(EIP93_PE_CONFIG_PE_MODE, 0x0) +#define EIP93_PE_CONFIG_RST_RING BIT(2) +#define EIP93_PE_CONFIG_RST_PE BIT(0) +#define EIP93_REG_PE_STATUS 0x104 +#define EIP93_REG_PE_BUF_THRESH 0x10c +#define EIP93_PE_OUTBUF_THRESH GENMASK(23, 16) +#define EIP93_PE_INBUF_THRESH GENMASK(7, 0) +#define EIP93_REG_PE_INBUF_COUNT 0x100 +#define EIP93_REG_PE_OUTBUF_COUNT 0x114 +#define EIP93_REG_PE_BUF_RW_PNTR 0x118 /* BUF_PNTR */ + +/* PACKET ENGINE endian config */ +#define EIP93_REG_PE_ENDIAN_CONFIG 0x1cc +#define EIP93_AIROHA_REG_PE_ENDIAN_CONFIG 0x1d0 +#define EIP93_PE_ENDIAN_TARGET_BYTE_SWAP GENMASK(23, 16) +#define EIP93_PE_ENDIAN_MASTER_BYTE_SWAP GENMASK(7, 0) +/* + * Byte goes 2 and 2 and are referenced by ID + * Split GENMASK(7, 0) in 4 part, one for each byte. + * Example LITTLE ENDIAN: Example BIG ENDIAN + * GENMASK(7, 6) 0x3 GENMASK(7, 6) 0x0 + * GENMASK(5, 4) 0x2 GENMASK(7, 6) 0x1 + * GENMASK(3, 2) 0x1 GENMASK(3, 2) 0x2 + * GENMASK(1, 0) 0x0 GENMASK(1, 0) 0x3 + */ +#define EIP93_PE_ENDIAN_BYTE0 0x0 +#define EIP93_PE_ENDIAN_BYTE1 0x1 +#define EIP93_PE_ENDIAN_BYTE2 0x2 +#define EIP93_PE_ENDIAN_BYTE3 0x3 + +/* EIP93 CLOCK control registers */ +#define EIP93_REG_PE_CLOCK_CTRL 0x1e8 +#define EIP93_PE_CLOCK_EN_HASH_CLK BIT(4) +#define EIP93_PE_CLOCK_EN_ARC4_CLK BIT(3) +#define EIP93_PE_CLOCK_EN_AES_CLK BIT(2) +#define EIP93_PE_CLOCK_EN_DES_CLK BIT(1) +#define EIP93_PE_CLOCK_EN_PE_CLK BIT(0) + +/* EIP93 Device Option and Revision Register */ +#define EIP93_REG_PE_OPTION_1 0x1f4 +#define EIP93_PE_OPTION_MAC_KEY256 BIT(31) +#define EIP93_PE_OPTION_MAC_KEY192 BIT(30) +#define EIP93_PE_OPTION_MAC_KEY128 BIT(29) +#define EIP93_PE_OPTION_AES_CBC_MAC BIT(28) +#define EIP93_PE_OPTION_AES_XCBX BIT(23) +#define EIP93_PE_OPTION_SHA_256 BIT(19) +#define EIP93_PE_OPTION_SHA_224 BIT(18) +#define EIP93_PE_OPTION_SHA_1 BIT(17) +#define EIP93_PE_OPTION_MD5 BIT(16) +#define EIP93_PE_OPTION_AES_KEY256 BIT(15) +#define EIP93_PE_OPTION_AES_KEY192 BIT(14) +#define EIP93_PE_OPTION_AES_KEY128 BIT(13) +#define EIP93_PE_OPTION_AES BIT(2) +#define EIP93_PE_OPTION_ARC4 BIT(1) +#define EIP93_PE_OPTION_TDES BIT(0) /* DES and TDES */ +#define EIP93_REG_PE_OPTION_0 0x1f8 +#define EIP93_REG_PE_REVISION 0x1fc +#define EIP93_PE_REVISION_MAJ_HW_REV GENMASK(27, 24) +#define EIP93_PE_REVISION_MIN_HW_REV GENMASK(23, 20) +#define EIP93_PE_REVISION_HW_PATCH GENMASK(19, 16) +#define EIP93_PE_REVISION_EIP_NO GENMASK(7, 0) + +/* EIP93 Interrupt Control Register */ +#define EIP93_REG_INT_UNMASK_STAT 0x200 +#define EIP93_REG_INT_MASK_STAT 0x204 +#define EIP93_REG_INT_CLR 0x204 +#define EIP93_REG_INT_MASK 0x208 /* INT_EN */ +/* Each int reg have the same bitmap */ +#define EIP93_INT_INTERFACE_ERR BIT(18) +#define EIP93_INT_RPOC_ERR BIT(17) +#define EIP93_INT_PE_RING_ERR BIT(16) +#define EIP93_INT_HALT BIT(15) +#define EIP93_INT_OUTBUF_THRESH BIT(11) +#define EIP93_INT_INBUF_THRESH BIT(10) +#define EIP93_INT_OPERATION_DONE BIT(9) +#define EIP93_INT_RDR_THRESH BIT(1) +#define EIP93_INT_CDR_THRESH BIT(0) +#define EIP93_INT_ALL (EIP93_INT_INTERFACE_ERR | \ + EIP93_INT_RPOC_ERR | \ + EIP93_INT_PE_RING_ERR | \ + EIP93_INT_HALT | \ + EIP93_INT_OUTBUF_THRESH | \ + EIP93_INT_INBUF_THRESH | \ + EIP93_INT_OPERATION_DONE | \ + EIP93_INT_RDR_THRESH | \ + EIP93_INT_CDR_THRESH) + +#define EIP93_REG_INT_CFG 0x20c +#define EIP93_INT_TYPE_PULSE BIT(0) +#define EIP93_REG_MASK_ENABLE 0x210 +#define EIP93_REG_MASK_DISABLE 0x214 + +/* EIP93 SA Record register */ +#define EIP93_REG_SA_CMD_0 0x400 +#define EIP93_SA_CMD_SAVE_HASH BIT(29) +#define EIP93_SA_CMD_SAVE_IV BIT(28) +#define EIP93_SA_CMD_HASH_SOURCE GENMASK(27, 26) +#define EIP93_SA_CMD_HASH_NO_LOAD FIELD_PREP(EIP93_SA_CMD_HASH_SOURCE, 0x3) +#define EIP93_SA_CMD_HASH_FROM_STATE FIELD_PREP(EIP93_SA_CMD_HASH_SOURCE, 0x2) +#define EIP93_SA_CMD_HASH_FROM_SA FIELD_PREP(EIP93_SA_CMD_HASH_SOURCE, 0x0) +#define EIP93_SA_CMD_IV_SOURCE GENMASK(25, 24) +#define EIP93_SA_CMD_IV_FROM_PRNG FIELD_PREP(EIP93_SA_CMD_IV_SOURCE, 0x3) +#define EIP93_SA_CMD_IV_FROM_STATE FIELD_PREP(EIP93_SA_CMD_IV_SOURCE, 0x2) +#define EIP93_SA_CMD_IV_FROM_INPUT FIELD_PREP(EIP93_SA_CMD_IV_SOURCE, 0x1) +#define EIP93_SA_CMD_IV_NO_LOAD FIELD_PREP(EIP93_SA_CMD_IV_SOURCE, 0x0) +#define EIP93_SA_CMD_DIGEST_LENGTH GENMASK(23, 20) +#define EIP93_SA_CMD_DIGEST_10WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0xa) /* SRTP and TLS */ +#define EIP93_SA_CMD_DIGEST_8WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x8) /* SHA-256 */ +#define EIP93_SA_CMD_DIGEST_7WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x7) /* SHA-224 */ +#define EIP93_SA_CMD_DIGEST_6WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x6) +#define EIP93_SA_CMD_DIGEST_5WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x5) /* SHA1 */ +#define EIP93_SA_CMD_DIGEST_4WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x4) /* MD5 and AES-based */ +#define EIP93_SA_CMD_DIGEST_3WORD_IPSEC FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x3) /* IPSEC */ +#define EIP93_SA_CMD_DIGEST_2WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x2) +#define EIP93_SA_CMD_DIGEST_1WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x1) +#define EIP93_SA_CMD_DIGEST_3WORD FIELD_PREP(EIP93_SA_CMD_DIGEST_LENGTH, 0x0) /* 96bit output */ +#define EIP93_SA_CMD_HDR_PROC BIT(19) +#define EIP93_SA_CMD_EXT_PAD BIT(18) +#define EIP93_SA_CMD_SCPAD BIT(17) +#define EIP93_SA_CMD_HASH GENMASK(15, 12) +#define EIP93_SA_CMD_HASH_NULL FIELD_PREP(EIP93_SA_CMD_HASH, 0xf) +#define EIP93_SA_CMD_HASH_SHA256 FIELD_PREP(EIP93_SA_CMD_HASH, 0x3) +#define EIP93_SA_CMD_HASH_SHA224 FIELD_PREP(EIP93_SA_CMD_HASH, 0x2) +#define EIP93_SA_CMD_HASH_SHA1 FIELD_PREP(EIP93_SA_CMD_HASH, 0x1) +#define EIP93_SA_CMD_HASH_MD5 FIELD_PREP(EIP93_SA_CMD_HASH, 0x0) +#define EIP93_SA_CMD_CIPHER GENMASK(11, 8) +#define EIP93_SA_CMD_CIPHER_NULL FIELD_PREP(EIP93_SA_CMD_CIPHER, 0xf) +#define EIP93_SA_CMD_CIPHER_AES FIELD_PREP(EIP93_SA_CMD_CIPHER, 0x3) +#define EIP93_SA_CMD_CIPHER_ARC4 FIELD_PREP(EIP93_SA_CMD_CIPHER, 0x2) +#define EIP93_SA_CMD_CIPHER_3DES FIELD_PREP(EIP93_SA_CMD_CIPHER, 0x1) +#define EIP93_SA_CMD_CIPHER_DES FIELD_PREP(EIP93_SA_CMD_CIPHER, 0x0) +#define EIP93_SA_CMD_PAD_TYPE GENMASK(7, 6) +#define EIP93_SA_CMD_PAD_CONST_SSL FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x6) +#define EIP93_SA_CMD_PAD_TLS_DTLS FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x5) +#define EIP93_SA_CMD_PAD_ZERO FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x3) +#define EIP93_SA_CMD_PAD_CONST FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x2) +#define EIP93_SA_CMD_PAD_PKCS7 FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x1) +#define EIP93_SA_CMD_PAD_IPSEC FIELD_PREP(EIP93_SA_CMD_PAD_TYPE, 0x0) +#define EIP93_SA_CMD_OPGROUP GENMASK(5, 4) +#define EIP93_SA_CMD_OP_EXT FIELD_PREP(EIP93_SA_CMD_OPGROUP, 0x2) +#define EIP93_SA_CMD_OP_PROTOCOL FIELD_PREP(EIP93_SA_CMD_OPGROUP, 0x1) +#define EIP93_SA_CMD_OP_BASIC FIELD_PREP(EIP93_SA_CMD_OPGROUP, 0x0) +#define EIP93_SA_CMD_DIRECTION_IN BIT(3) /* 0: outbount 1: inbound */ +#define EIP93_SA_CMD_OPCODE GENMASK(2, 0) +#define EIP93_SA_CMD_OPCODE_BASIC_OUT_PRNG 0x7 +#define EIP93_SA_CMD_OPCODE_BASIC_OUT_HASH 0x3 +#define EIP93_SA_CMD_OPCODE_BASIC_OUT_ENC_HASH 0x1 +#define EIP93_SA_CMD_OPCODE_BASIC_OUT_ENC 0x0 +#define EIP93_SA_CMD_OPCODE_BASIC_IN_HASH 0x3 +#define EIP93_SA_CMD_OPCODE_BASIC_IN_HASH_DEC 0x1 +#define EIP93_SA_CMD_OPCODE_BASIC_IN_DEC 0x0 +#define EIP93_SA_CMD_OPCODE_PROTOCOL_OUT_ESP 0x0 +#define EIP93_SA_CMD_OPCODE_PROTOCOL_OUT_SSL 0x4 +#define EIP93_SA_CMD_OPCODE_PROTOCOL_OUT_TLS 0x5 +#define EIP93_SA_CMD_OPCODE_PROTOCOL_OUT_SRTP 0x7 +#define EIP93_SA_CMD_OPCODE_PROTOCOL_IN_ESP 0x0 +#define EIP93_SA_CMD_OPCODE_PROTOCOL_IN_SSL 0x2 +#define EIP93_SA_CMD_OPCODE_PROTOCOL_IN_TLS 0x3 +#define EIP93_SA_CMD_OPCODE_PROTOCOL_IN_SRTP 0x7 +#define EIP93_SA_CMD_OPCODE_EXT_OUT_DTSL 0x1 +#define EIP93_SA_CMD_OPCODE_EXT_OUT_SSL 0x4 +#define EIP93_SA_CMD_OPCODE_EXT_OUT_TLSV10 0x5 +#define EIP93_SA_CMD_OPCODE_EXT_OUT_TLSV11 0x6 +#define EIP93_SA_CMD_OPCODE_EXT_IN_DTSL 0x1 +#define EIP93_SA_CMD_OPCODE_EXT_IN_SSL 0x4 +#define EIP93_SA_CMD_OPCODE_EXT_IN_TLSV10 0x5 +#define EIP93_SA_CMD_OPCODE_EXT_IN_TLSV11 0x6 +#define EIP93_REG_SA_CMD_1 0x404 +#define EIP93_SA_CMD_EN_SEQNUM_CHK BIT(29) +/* This mask can be either used for ARC4 or AES */ +#define EIP93_SA_CMD_ARC4_KEY_LENGHT GENMASK(28, 24) +#define EIP93_SA_CMD_AES_DEC_KEY BIT(28) /* 0: encrypt key 1: decrypt key */ +#define EIP93_SA_CMD_AES_KEY_LENGTH GENMASK(26, 24) +#define EIP93_SA_CMD_AES_KEY_256BIT FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH, 0x4) +#define EIP93_SA_CMD_AES_KEY_192BIT FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH, 0x3) +#define EIP93_SA_CMD_AES_KEY_128BIT FIELD_PREP(EIP93_SA_CMD_AES_KEY_LENGTH, 0x2) +#define EIP93_SA_CMD_HASH_CRYPT_OFFSET GENMASK(23, 16) +#define EIP93_SA_CMD_BYTE_OFFSET BIT(13) /* 0: CRYPT_OFFSET in 32bit word 1: CRYPT_OFFSET in 8bit bytes */ +#define EIP93_SA_CMD_HMAC BIT(12) +#define EIP93_SA_CMD_SSL_MAC BIT(12) +/* This mask can be either used for ARC4 or AES */ +#define EIP93_SA_CMD_CHIPER_MODE GENMASK(9, 8) +/* AES or DES operations */ +#define EIP93_SA_CMD_CHIPER_MODE_ICM FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x3) +#define EIP93_SA_CMD_CHIPER_MODE_CTR FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x2) +#define EIP93_SA_CMD_CHIPER_MODE_CBC FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x1) +#define EIP93_SA_CMD_CHIPER_MODE_ECB FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x0) +/* ARC4 operations */ +#define EIP93_SA_CMD_CHIPER_MODE_STATEFULL FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x1) +#define EIP93_SA_CMD_CHIPER_MODE_STATELESS FIELD_PREP(EIP93_SA_CMD_CHIPER_MODE, 0x0) +#define EIP93_SA_CMD_COPY_PAD BIT(3) +#define EIP93_SA_CMD_COPY_PAYLOAD BIT(2) +#define EIP93_SA_CMD_COPY_HEADER BIT(1) +#define EIP93_SA_CMD_COPY_DIGEST BIT(0) /* With this enabled, COPY_PAD is required */ + +/* State save register */ +#define EIP93_REG_STATE_IV_0 0x500 +#define EIP93_REG_STATE_IV_1 0x504 + +#define EIP93_REG_PE_ARC4STATE 0x700 + +struct sa_record { + u32 sa_cmd0_word; + u32 sa_cmd1_word; + u32 sa_key[8]; + u8 sa_i_digest[32]; + u8 sa_o_digest[32]; + u32 sa_spi; + u32 sa_seqnum[2]; + u32 sa_seqmum_mask[2]; + u32 sa_nonce; +} __packed; + +struct sa_state { + u32 state_iv[4]; + u32 state_byte_cnt[2]; + u8 state_i_digest[32]; +} __packed; + +struct eip93_descriptor { + u32 pe_ctrl_stat_word; + u32 src_addr; + u32 dst_addr; + u32 sa_addr; + u32 state_addr; + u32 arc4_addr; + u32 user_id; + u32 pe_length_word; +} __packed; + +#endif diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index c3776b0de51d..09d9589f2d68 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -33,8 +33,6 @@ static unsigned int nr_cpus_per_node; /* Number of physical cpus sharing each iaa instance */ static unsigned int cpus_per_iaa; -static struct crypto_comp *deflate_generic_tfm; - /* Per-cpu lookup table for balanced wqs */ static struct wq_table_entry __percpu *wq_table; @@ -1001,17 +999,14 @@ out: static int deflate_generic_decompress(struct acomp_req *req) { - void *src, *dst; + ACOMP_REQUEST_ON_STACK(fbreq, crypto_acomp_reqtfm(req)); int ret; - src = kmap_local_page(sg_page(req->src)) + req->src->offset; - dst = kmap_local_page(sg_page(req->dst)) + req->dst->offset; - - ret = crypto_comp_decompress(deflate_generic_tfm, - src, req->slen, dst, &req->dlen); - - kunmap_local(src); - kunmap_local(dst); + acomp_request_set_callback(fbreq, 0, NULL, NULL); + acomp_request_set_params(fbreq, req->src, req->dst, req->slen, + req->dlen); + ret = crypto_acomp_decompress(fbreq); + req->dlen = fbreq->dlen; update_total_sw_decomp_calls(); @@ -1136,8 +1131,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, struct idxd_wq *wq, dma_addr_t src_addr, unsigned int slen, dma_addr_t dst_addr, unsigned int *dlen, - u32 *compression_crc, - bool disable_async) + u32 *compression_crc) { struct iaa_device_compression_mode *active_compression_mode; struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); @@ -1180,7 +1174,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, desc->src2_size = sizeof(struct aecs_comp_table_record); desc->completion_addr = idxd_desc->compl_dma; - if (ctx->use_irq && !disable_async) { + if (ctx->use_irq) { desc->flags |= IDXD_OP_FLAG_RCI; idxd_desc->crypto.req = req; @@ -1193,7 +1187,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, " src_addr %llx, dst_addr %llx\n", __func__, active_compression_mode->name, src_addr, dst_addr); - } else if (ctx->async_mode && !disable_async) + } else if (ctx->async_mode) req->base.data = idxd_desc; dev_dbg(dev, "%s: compression mode %s," @@ -1214,7 +1208,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, update_total_comp_calls(); update_wq_comp_calls(wq); - if (ctx->async_mode && !disable_async) { + if (ctx->async_mode) { ret = -EINPROGRESS; dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__); goto out; @@ -1234,7 +1228,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, *compression_crc = idxd_desc->iax_completion->crc; - if (!ctx->async_mode || disable_async) + if (!ctx->async_mode) idxd_free_desc(wq, idxd_desc); out: return ret; @@ -1500,13 +1494,11 @@ static int iaa_comp_acompress(struct acomp_req *req) struct iaa_compression_ctx *compression_ctx; struct crypto_tfm *tfm = req->base.tfm; dma_addr_t src_addr, dst_addr; - bool disable_async = false; int nr_sgs, cpu, ret = 0; struct iaa_wq *iaa_wq; u32 compression_crc; struct idxd_wq *wq; struct device *dev; - int order = -1; compression_ctx = crypto_tfm_ctx(tfm); @@ -1536,21 +1528,6 @@ static int iaa_comp_acompress(struct acomp_req *req) iaa_wq = idxd_wq_get_private(wq); - if (!req->dst) { - gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; - - /* incompressible data will always be < 2 * slen */ - req->dlen = 2 * req->slen; - order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE); - req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL); - if (!req->dst) { - ret = -ENOMEM; - order = -1; - goto out; - } - disable_async = true; - } - dev = &wq->idxd->pdev->dev; nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); @@ -1580,7 +1557,7 @@ static int iaa_comp_acompress(struct acomp_req *req) req->dst, req->dlen, sg_dma_len(req->dst)); ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr, - &req->dlen, &compression_crc, disable_async); + &req->dlen, &compression_crc); if (ret == -EINPROGRESS) return ret; @@ -1611,100 +1588,6 @@ err_map_dst: out: iaa_wq_put(wq); - if (order >= 0) - sgl_free_order(req->dst, order); - - return ret; -} - -static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req) -{ - gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? - GFP_KERNEL : GFP_ATOMIC; - struct crypto_tfm *tfm = req->base.tfm; - dma_addr_t src_addr, dst_addr; - int nr_sgs, cpu, ret = 0; - struct iaa_wq *iaa_wq; - struct device *dev; - struct idxd_wq *wq; - int order = -1; - - cpu = get_cpu(); - wq = wq_table_next_wq(cpu); - put_cpu(); - if (!wq) { - pr_debug("no wq configured for cpu=%d\n", cpu); - return -ENODEV; - } - - ret = iaa_wq_get(wq); - if (ret) { - pr_debug("no wq available for cpu=%d\n", cpu); - return -ENODEV; - } - - iaa_wq = idxd_wq_get_private(wq); - - dev = &wq->idxd->pdev->dev; - - nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); - if (nr_sgs <= 0 || nr_sgs > 1) { - dev_dbg(dev, "couldn't map src sg for iaa device %d," - " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, - iaa_wq->wq->id, ret); - ret = -EIO; - goto out; - } - src_addr = sg_dma_address(req->src); - dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p," - " req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs, - req->src, req->slen, sg_dma_len(req->src)); - - req->dlen = 4 * req->slen; /* start with ~avg comp rato */ -alloc_dest: - order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE); - req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL); - if (!req->dst) { - ret = -ENOMEM; - order = -1; - goto out; - } - - nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); - if (nr_sgs <= 0 || nr_sgs > 1) { - dev_dbg(dev, "couldn't map dst sg for iaa device %d," - " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, - iaa_wq->wq->id, ret); - ret = -EIO; - goto err_map_dst; - } - - dst_addr = sg_dma_address(req->dst); - dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p," - " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, - req->dst, req->dlen, sg_dma_len(req->dst)); - ret = iaa_decompress(tfm, req, wq, src_addr, req->slen, - dst_addr, &req->dlen, true); - if (ret == -EOVERFLOW) { - dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); - req->dlen *= 2; - if (req->dlen > CRYPTO_ACOMP_DST_MAX) - goto err_map_dst; - goto alloc_dest; - } - - if (ret != 0) - dev_dbg(dev, "asynchronous decompress failed ret=%d\n", ret); - - dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); -err_map_dst: - dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); -out: - iaa_wq_put(wq); - - if (order >= 0) - sgl_free_order(req->dst, order); - return ret; } @@ -1727,9 +1610,6 @@ static int iaa_comp_adecompress(struct acomp_req *req) return -EINVAL; } - if (!req->dst) - return iaa_comp_adecompress_alloc_dest(req); - cpu = get_cpu(); wq = wq_table_next_wq(cpu); put_cpu(); @@ -1810,19 +1690,10 @@ static int iaa_comp_init_fixed(struct crypto_acomp *acomp_tfm) return 0; } -static void dst_free(struct scatterlist *sgl) -{ - /* - * Called for req->dst = NULL cases but we free elsewhere - * using sgl_free_order(). - */ -} - static struct acomp_alg iaa_acomp_fixed_deflate = { .init = iaa_comp_init_fixed, .compress = iaa_comp_acompress, .decompress = iaa_comp_adecompress, - .dst_free = dst_free, .base = { .cra_name = "deflate", .cra_driver_name = "deflate-iaa", @@ -2022,15 +1893,6 @@ static int __init iaa_crypto_init_module(void) } nr_cpus_per_node = nr_cpus / nr_nodes; - if (crypto_has_comp("deflate-generic", 0, 0)) - deflate_generic_tfm = crypto_alloc_comp("deflate-generic", 0, 0); - - if (IS_ERR_OR_NULL(deflate_generic_tfm)) { - pr_err("IAA could not alloc %s tfm: errcode = %ld\n", - "deflate-generic", PTR_ERR(deflate_generic_tfm)); - return -ENOMEM; - } - ret = iaa_aecs_init_fixed(); if (ret < 0) { pr_debug("IAA fixed compression mode init failed\n"); @@ -2072,7 +1934,6 @@ err_verify_attr_create: err_driver_reg: iaa_aecs_cleanup_fixed(); err_aecs_init: - crypto_free_comp(deflate_generic_tfm); goto out; } @@ -2089,7 +1950,6 @@ static void __exit iaa_crypto_cleanup_module(void) &driver_attr_verify_compress); idxd_driver_unregister(&iaa_crypto_driver); iaa_aecs_cleanup_fixed(); - crypto_free_comp(deflate_generic_tfm); pr_debug("cleaned up\n"); } diff --git a/drivers/crypto/intel/qat/qat_420xx/Makefile b/drivers/crypto/intel/qat/qat_420xx/Makefile index 45728659fbc4..72b24b1804cf 100644 --- a/drivers/crypto/intel/qat/qat_420xx/Makefile +++ b/drivers/crypto/intel/qat/qat_420xx/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx.o -qat_420xx-objs := adf_drv.o adf_420xx_hw_data.o +qat_420xx-y := adf_drv.o adf_420xx_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c index 9faef33e54bd..4feeef83f7a3 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -98,7 +98,7 @@ static struct adf_hw_device_class adf_420xx_class = { static u32 get_ae_mask(struct adf_hw_device_data *self) { - u32 me_disable = self->fuses; + u32 me_disable = self->fuses[ADF_FUSECTL4]; return ~me_disable & ADF_420XX_ACCELENGINES_MASK; } @@ -106,8 +106,7 @@ static u32 get_ae_mask(struct adf_hw_device_data *self) static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev) { switch (adf_get_service_enabled(accel_dev)) { - case SVC_CY: - case SVC_CY2: + case SVC_SYM_ASYM: return ARRAY_SIZE(adf_fw_cy_config); case SVC_DC: return ARRAY_SIZE(adf_fw_dc_config); @@ -118,10 +117,8 @@ static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev) case SVC_ASYM: return ARRAY_SIZE(adf_fw_asym_config); case SVC_ASYM_DC: - case SVC_DC_ASYM: return ARRAY_SIZE(adf_fw_asym_dc_config); case SVC_SYM_DC: - case SVC_DC_SYM: return ARRAY_SIZE(adf_fw_sym_dc_config); default: return 0; @@ -131,8 +128,7 @@ static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev) static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev) { switch (adf_get_service_enabled(accel_dev)) { - case SVC_CY: - case SVC_CY2: + case SVC_SYM_ASYM: return adf_fw_cy_config; case SVC_DC: return adf_fw_dc_config; @@ -143,10 +139,8 @@ static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev case SVC_ASYM: return adf_fw_asym_config; case SVC_ASYM_DC: - case SVC_DC_ASYM: return adf_fw_asym_dc_config; case SVC_SYM_DC: - case SVC_DC_SYM: return adf_fw_sym_dc_config; default: return NULL; @@ -266,8 +260,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) } switch (adf_get_service_enabled(accel_dev)) { - case SVC_CY: - case SVC_CY2: + case SVC_SYM_ASYM: return capabilities_sym | capabilities_asym; case SVC_DC: return capabilities_dc; @@ -284,10 +277,8 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) case SVC_ASYM: return capabilities_asym; case SVC_ASYM_DC: - case SVC_DC_ASYM: return capabilities_asym | capabilities_dc; case SVC_SYM_DC: - case SVC_DC_SYM: return capabilities_sym | capabilities_dc; default: return 0; @@ -420,6 +411,7 @@ static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask) dev_err_mask->parerr_cpr_xlt_mask = ADF_420XX_PARITYERRORMASK_CPR_XLT_MASK; dev_err_mask->parerr_dcpr_ucs_mask = ADF_420XX_PARITYERRORMASK_DCPR_UCS_MASK; dev_err_mask->parerr_pke_mask = ADF_420XX_PARITYERRORMASK_PKE_MASK; + dev_err_mask->parerr_wat_wcp_mask = ADF_420XX_PARITYERRORMASK_WAT_WCP_MASK; dev_err_mask->ssmfeatren_mask = ADF_420XX_SSMFEATREN_MASK; } @@ -482,6 +474,7 @@ void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock; hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; hw_data->clock_frequency = ADF_420XX_AE_FREQ; + hw_data->services_supported = adf_gen4_services_supported; adf_gen4_set_err_mask(&hw_data->dev_err_mask); adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c index 9589d60fb281..8084aa0f7f41 100644 --- a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c @@ -79,7 +79,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adf_init_hw_data_420xx(accel_dev->hw_device, ent->device); pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); - pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses); + pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses[ADF_FUSECTL4]); /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(hw_data); diff --git a/drivers/crypto/intel/qat/qat_4xxx/Makefile b/drivers/crypto/intel/qat/qat_4xxx/Makefile index 9ba202079a22..e8480bb80dee 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/Makefile +++ b/drivers/crypto/intel/qat/qat_4xxx/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx.o -qat_4xxx-objs := adf_drv.o adf_4xxx_hw_data.o +qat_4xxx-y := adf_drv.o adf_4xxx_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index bbd92c017c28..4eb6ef99efdd 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -101,7 +101,7 @@ static struct adf_hw_device_class adf_4xxx_class = { static u32 get_ae_mask(struct adf_hw_device_data *self) { - u32 me_disable = self->fuses; + u32 me_disable = self->fuses[ADF_FUSECTL4]; return ~me_disable & ADF_4XXX_ACCELENGINES_MASK; } @@ -178,8 +178,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) } switch (adf_get_service_enabled(accel_dev)) { - case SVC_CY: - case SVC_CY2: + case SVC_SYM_ASYM: return capabilities_sym | capabilities_asym; case SVC_DC: return capabilities_dc; @@ -196,10 +195,8 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) case SVC_ASYM: return capabilities_asym; case SVC_ASYM_DC: - case SVC_DC_ASYM: return capabilities_asym | capabilities_dc; case SVC_SYM_DC: - case SVC_DC_SYM: return capabilities_sym | capabilities_dc; default: return 0; @@ -241,8 +238,7 @@ static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev) static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev) { switch (adf_get_service_enabled(accel_dev)) { - case SVC_CY: - case SVC_CY2: + case SVC_SYM_ASYM: return adf_fw_cy_config; case SVC_DC: return adf_fw_dc_config; @@ -253,10 +249,8 @@ static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev case SVC_ASYM: return adf_fw_asym_config; case SVC_ASYM_DC: - case SVC_DC_ASYM: return adf_fw_asym_dc_config; case SVC_SYM_DC: - case SVC_DC_SYM: return adf_fw_sym_dc_config; default: return NULL; @@ -466,6 +460,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock; hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; hw_data->clock_frequency = ADF_4XXX_AE_FREQ; + hw_data->services_supported = adf_gen4_services_supported; adf_gen4_set_err_mask(&hw_data->dev_err_mask); adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c index d7de1cad1335..5537a9991e4e 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c @@ -81,7 +81,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adf_init_hw_data_4xxx(accel_dev->hw_device, ent->device); pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); - pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses); + pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses[ADF_FUSECTL4]); /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(hw_data); diff --git a/drivers/crypto/intel/qat/qat_c3xxx/Makefile b/drivers/crypto/intel/qat/qat_c3xxx/Makefile index 7a06ad519bfc..d9e568572da8 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/Makefile +++ b/drivers/crypto/intel/qat/qat_c3xxx/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx.o -qat_c3xxx-objs := adf_drv.o adf_c3xxx_hw_data.o +qat_c3xxx-y := adf_drv.o adf_c3xxx_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c index 201f9412c582..e78f7bfd30b8 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c @@ -27,8 +27,8 @@ static struct adf_hw_device_class c3xxx_class = { static u32 get_accel_mask(struct adf_hw_device_data *self) { + u32 fuses = self->fuses[ADF_FUSECTL0]; u32 straps = self->straps; - u32 fuses = self->fuses; u32 accel; accel = ~(fuses | straps) >> ADF_C3XXX_ACCELERATORS_REG_OFFSET; @@ -39,8 +39,8 @@ static u32 get_accel_mask(struct adf_hw_device_data *self) static u32 get_ae_mask(struct adf_hw_device_data *self) { + u32 fuses = self->fuses[ADF_FUSECTL0]; u32 straps = self->straps; - u32 fuses = self->fuses; unsigned long disabled; u32 ae_disable; int accel; diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c index caa53882fda6..b825b35ab4bf 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c @@ -126,7 +126,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adf_init_hw_data_c3xxx(accel_dev->hw_device); pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET, - &hw_data->fuses); + &hw_data->fuses[ADF_FUSECTL0]); pci_read_config_dword(pdev, ADF_C3XXX_SOFTSTRAP_CSR_OFFSET, &hw_data->straps); diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile index 7ef633058c4f..31a908a211ac 100644 --- a/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile +++ b/drivers/crypto/intel/qat/qat_c3xxxvf/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf.o -qat_c3xxxvf-objs := adf_drv.o adf_c3xxxvf_hw_data.o +qat_c3xxxvf-y := adf_drv.o adf_c3xxxvf_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_c62x/Makefile b/drivers/crypto/intel/qat/qat_c62x/Makefile index cc9255b3b198..cbdaaa135e84 100644 --- a/drivers/crypto/intel/qat/qat_c62x/Makefile +++ b/drivers/crypto/intel/qat/qat_c62x/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x.o -qat_c62x-objs := adf_drv.o adf_c62x_hw_data.o +qat_c62x-y := adf_drv.o adf_c62x_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c index 6b5b0cf9c7c7..32ebe09477a8 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c @@ -27,8 +27,8 @@ static struct adf_hw_device_class c62x_class = { static u32 get_accel_mask(struct adf_hw_device_data *self) { + u32 fuses = self->fuses[ADF_FUSECTL0]; u32 straps = self->straps; - u32 fuses = self->fuses; u32 accel; accel = ~(fuses | straps) >> ADF_C62X_ACCELERATORS_REG_OFFSET; @@ -39,8 +39,8 @@ static u32 get_accel_mask(struct adf_hw_device_data *self) static u32 get_ae_mask(struct adf_hw_device_data *self) { + u32 fuses = self->fuses[ADF_FUSECTL0]; u32 straps = self->straps; - u32 fuses = self->fuses; unsigned long disabled; u32 ae_disable; int accel; diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c index b7398fee19ed..8a7bdec358d6 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c @@ -126,7 +126,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adf_init_hw_data_c62x(accel_dev->hw_device); pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET, - &hw_data->fuses); + &hw_data->fuses[ADF_FUSECTL0]); pci_read_config_dword(pdev, ADF_C62X_SOFTSTRAP_CSR_OFFSET, &hw_data->straps); @@ -169,7 +169,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); /* Find and map all the device's BARS */ - i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0; + i = (hw_data->fuses[ADF_FUSECTL0] & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0; bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) { struct adf_bar *bar = &accel_pci_dev->pci_bars[i++]; diff --git a/drivers/crypto/intel/qat/qat_c62xvf/Makefile b/drivers/crypto/intel/qat/qat_c62xvf/Makefile index 256786662d60..60e499b041ec 100644 --- a/drivers/crypto/intel/qat/qat_c62xvf/Makefile +++ b/drivers/crypto/intel/qat/qat_c62xvf/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf.o -qat_c62xvf-objs := adf_drv.o adf_c62xvf_hw_data.o +qat_c62xvf-y := adf_drv.o adf_c62xvf_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index 7acf9c576149..af5df29fd2e3 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -1,62 +1,62 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE='"CRYPTO_QAT"' -intel_qat-objs := adf_cfg.o \ - adf_isr.o \ - adf_ctl_drv.o \ +intel_qat-y := adf_accel_engine.o \ + adf_admin.o \ + adf_aer.o \ + adf_cfg.o \ adf_cfg_services.o \ + adf_clock.o \ + adf_ctl_drv.o \ adf_dev_mgr.o \ - adf_init.o \ - adf_accel_engine.o \ - adf_aer.o \ - adf_transport.o \ - adf_admin.o \ - adf_hw_arbiter.o \ - adf_sysfs.o \ - adf_sysfs_ras_counters.o \ + adf_gen2_config.o \ + adf_gen2_dc.o \ adf_gen2_hw_csr_data.o \ adf_gen2_hw_data.o \ - adf_gen2_config.o \ adf_gen4_config.o \ + adf_gen4_dc.o \ adf_gen4_hw_csr_data.o \ adf_gen4_hw_data.o \ - adf_gen4_vf_mig.o \ adf_gen4_pm.o \ - adf_gen2_dc.o \ - adf_gen4_dc.o \ adf_gen4_ras.o \ adf_gen4_timer.o \ - adf_clock.o \ + adf_gen4_vf_mig.o \ + adf_hw_arbiter.o \ + adf_init.o \ + adf_isr.o \ adf_mstate_mgr.o \ - qat_crypto.o \ - qat_compression.o \ - qat_comp_algs.o \ - qat_algs.o \ - qat_asym_algs.o \ - qat_algs_send.o \ - adf_rl.o \ adf_rl_admin.o \ + adf_rl.o \ + adf_sysfs.o \ + adf_sysfs_ras_counters.o \ adf_sysfs_rl.o \ - qat_uclo.o \ - qat_hal.o \ + adf_transport.o \ + qat_algs.o \ + qat_algs_send.o \ + qat_asym_algs.o \ qat_bl.o \ - qat_mig_dev.o + qat_comp_algs.o \ + qat_compression.o \ + qat_crypto.o \ + qat_hal.o \ + qat_mig_dev.o \ + qat_uclo.o -intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \ +intel_qat-$(CONFIG_DEBUG_FS) += adf_cnv_dbgfs.o \ + adf_dbgfs.o \ adf_fw_counters.o \ - adf_cnv_dbgfs.o \ adf_gen4_pm_debugfs.o \ adf_gen4_tl.o \ - adf_heartbeat.o \ adf_heartbeat_dbgfs.o \ + adf_heartbeat.o \ adf_pm_dbgfs.o \ adf_telemetry.o \ adf_tl_debugfs.o \ - adf_dbgfs.o + adf_transport_debug.o -intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \ +intel_qat-$(CONFIG_PCI_IOV) += adf_gen2_pfvf.o adf_gen4_pfvf.o \ adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \ - adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \ - adf_gen2_pfvf.o adf_gen4_pfvf.o + adf_pfvf_utils.o adf_pfvf_vf_msg.o \ + adf_pfvf_vf_proto.o adf_sriov.o adf_vf_isr.o intel_qat-$(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION) += adf_heartbeat_inject.o diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 7830ecb1a1f1..dc21551153cb 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -10,6 +10,7 @@ #include <linux/ratelimit.h> #include <linux/types.h> #include <linux/qat/qat_mig_dev.h> +#include <linux/wordpart.h> #include "adf_cfg_common.h" #include "adf_rl.h" #include "adf_telemetry.h" @@ -52,6 +53,16 @@ enum adf_accel_capabilities { ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128 }; +enum adf_fuses { + ADF_FUSECTL0, + ADF_FUSECTL1, + ADF_FUSECTL2, + ADF_FUSECTL3, + ADF_FUSECTL4, + ADF_FUSECTL5, + ADF_MAX_FUSES +}; + struct adf_bar { resource_size_t base_addr; void __iomem *virt_addr; @@ -333,6 +344,7 @@ struct adf_hw_device_data { int (*get_rp_group)(struct adf_accel_dev *accel_dev, u32 ae_mask); u32 (*get_ena_thd_mask)(struct adf_accel_dev *accel_dev, u32 obj_num); int (*dev_config)(struct adf_accel_dev *accel_dev); + bool (*services_supported)(unsigned long mask); struct adf_pfvf_ops pfvf_ops; struct adf_hw_csr_ops csr_ops; struct adf_dc_ops dc_ops; @@ -343,7 +355,7 @@ struct adf_hw_device_data { struct qat_migdev_ops vfmig_ops; const char *fw_name; const char *fw_mmp_name; - u32 fuses; + u32 fuses[ADF_MAX_FUSES]; u32 straps; u32 accel_capabilities_mask; u32 extended_dc_capabilities; @@ -370,6 +382,15 @@ struct adf_hw_device_data { /* CSR write macro */ #define ADF_CSR_WR(csr_base, csr_offset, val) \ __raw_writel(val, csr_base + csr_offset) +/* + * CSR write macro to handle cases where the high and low + * offsets are sparsely located. + */ +#define ADF_CSR_WR64_LO_HI(csr_base, csr_low_offset, csr_high_offset, val) \ +do { \ + ADF_CSR_WR(csr_base, csr_low_offset, lower_32_bits(val)); \ + ADF_CSR_WR(csr_base, csr_high_offset, upper_32_bits(val)); \ +} while (0) /* CSR read macro */ #define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset) diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c index 268052294468..30abcd9e1283 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2023 Intel Corporation */ +#include <linux/array_size.h> +#include <linux/bitops.h> #include <linux/export.h> #include <linux/pci.h> #include <linux/string.h> @@ -8,40 +10,165 @@ #include "adf_cfg_services.h" #include "adf_cfg_strings.h" -const char *const adf_cfg_services[] = { - [SVC_CY] = ADF_CFG_CY, - [SVC_CY2] = ADF_CFG_ASYM_SYM, +static const char *const adf_cfg_services[] = { + [SVC_ASYM] = ADF_CFG_ASYM, + [SVC_SYM] = ADF_CFG_SYM, [SVC_DC] = ADF_CFG_DC, [SVC_DCC] = ADF_CFG_DCC, - [SVC_SYM] = ADF_CFG_SYM, - [SVC_ASYM] = ADF_CFG_ASYM, - [SVC_DC_ASYM] = ADF_CFG_DC_ASYM, - [SVC_ASYM_DC] = ADF_CFG_ASYM_DC, - [SVC_DC_SYM] = ADF_CFG_DC_SYM, - [SVC_SYM_DC] = ADF_CFG_SYM_DC, }; -EXPORT_SYMBOL_GPL(adf_cfg_services); -int adf_get_service_enabled(struct adf_accel_dev *accel_dev) +/* + * Ensure that the size of the array matches the number of services, + * SVC_BASE_COUNT, that is used to size the bitmap. + */ +static_assert(ARRAY_SIZE(adf_cfg_services) == SVC_BASE_COUNT); + +/* + * Ensure that the maximum number of concurrent services that can be + * enabled on a device is less than or equal to the number of total + * supported services. + */ +static_assert(ARRAY_SIZE(adf_cfg_services) >= MAX_NUM_CONCURR_SVC); + +/* + * Ensure that the number of services fit a single unsigned long, as each + * service is represented by a bit in the mask. + */ +static_assert(BITS_PER_LONG >= SVC_BASE_COUNT); + +/* + * Ensure that size of the concatenation of all service strings is smaller + * than the size of the buffer that will contain them. + */ +static_assert(sizeof(ADF_CFG_SYM ADF_SERVICES_DELIMITER + ADF_CFG_ASYM ADF_SERVICES_DELIMITER + ADF_CFG_DC ADF_SERVICES_DELIMITER + ADF_CFG_DCC) < ADF_CFG_MAX_VAL_LEN_IN_BYTES); + +static int adf_service_string_to_mask(struct adf_accel_dev *accel_dev, const char *buf, + size_t len, unsigned long *out_mask) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { }; + unsigned long mask = 0; + char *substr, *token; + int id, num_svc = 0; + + if (len > ADF_CFG_MAX_VAL_LEN_IN_BYTES - 1) + return -EINVAL; + + strscpy(services, buf, ADF_CFG_MAX_VAL_LEN_IN_BYTES); + substr = services; + + while ((token = strsep(&substr, ADF_SERVICES_DELIMITER))) { + id = sysfs_match_string(adf_cfg_services, token); + if (id < 0) + return id; + + if (test_and_set_bit(id, &mask)) + return -EINVAL; + + if (num_svc++ == MAX_NUM_CONCURR_SVC) + return -EINVAL; + } + + if (hw_data->services_supported && !hw_data->services_supported(mask)) + return -EINVAL; + + *out_mask = mask; + + return 0; +} + +static int adf_service_mask_to_string(unsigned long mask, char *buf, size_t len) +{ + int offset = 0; + int bit; + + if (len < ADF_CFG_MAX_VAL_LEN_IN_BYTES) + return -ENOSPC; + + for_each_set_bit(bit, &mask, SVC_BASE_COUNT) { + if (offset) + offset += scnprintf(buf + offset, len - offset, + ADF_SERVICES_DELIMITER); + + offset += scnprintf(buf + offset, len - offset, "%s", + adf_cfg_services[bit]); + } + + return 0; +} + +int adf_parse_service_string(struct adf_accel_dev *accel_dev, const char *in, + size_t in_len, char *out, size_t out_len) { - char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; + unsigned long mask; + int ret; + + ret = adf_service_string_to_mask(accel_dev, in, in_len, &mask); + if (ret) + return ret; + + if (!mask) + return -EINVAL; + + return adf_service_mask_to_string(mask, out, out_len); +} + +static int adf_get_service_mask(struct adf_accel_dev *accel_dev, unsigned long *mask) +{ + char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { }; + size_t len; int ret; ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, ADF_SERVICES_ENABLED, services); if (ret) { - dev_err(&GET_DEV(accel_dev), - ADF_SERVICES_ENABLED " param not found\n"); + dev_err(&GET_DEV(accel_dev), "%s param not found\n", + ADF_SERVICES_ENABLED); return ret; } - ret = match_string(adf_cfg_services, ARRAY_SIZE(adf_cfg_services), - services); - if (ret < 0) - dev_err(&GET_DEV(accel_dev), - "Invalid value of " ADF_SERVICES_ENABLED " param: %s\n", - services); + len = strnlen(services, ADF_CFG_MAX_VAL_LEN_IN_BYTES); + ret = adf_service_string_to_mask(accel_dev, services, len, mask); + if (ret) + dev_err(&GET_DEV(accel_dev), "Invalid value of %s param: %s\n", + ADF_SERVICES_ENABLED, services); return ret; } + +int adf_get_service_enabled(struct adf_accel_dev *accel_dev) +{ + unsigned long mask; + int ret; + + ret = adf_get_service_mask(accel_dev, &mask); + if (ret) + return ret; + + if (test_bit(SVC_SYM, &mask) && test_bit(SVC_ASYM, &mask)) + return SVC_SYM_ASYM; + + if (test_bit(SVC_SYM, &mask) && test_bit(SVC_DC, &mask)) + return SVC_SYM_DC; + + if (test_bit(SVC_ASYM, &mask) && test_bit(SVC_DC, &mask)) + return SVC_ASYM_DC; + + if (test_bit(SVC_SYM, &mask)) + return SVC_SYM; + + if (test_bit(SVC_ASYM, &mask)) + return SVC_ASYM; + + if (test_bit(SVC_DC, &mask)) + return SVC_DC; + + if (test_bit(SVC_DCC, &mask)) + return SVC_DCC; + + return -EINVAL; +} EXPORT_SYMBOL_GPL(adf_get_service_enabled); diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h index c6b0328b0f5b..f6bafc15cbc6 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h @@ -8,21 +8,29 @@ struct adf_accel_dev; enum adf_services { - SVC_CY = 0, - SVC_CY2, + SVC_ASYM = 0, + SVC_SYM, SVC_DC, SVC_DCC, - SVC_SYM, - SVC_ASYM, - SVC_DC_ASYM, - SVC_ASYM_DC, - SVC_DC_SYM, + SVC_BASE_COUNT +}; + +enum adf_composed_services { + SVC_SYM_ASYM = SVC_BASE_COUNT, SVC_SYM_DC, - SVC_COUNT + SVC_ASYM_DC, +}; + +enum { + ADF_ONE_SERVICE = 1, + ADF_TWO_SERVICES, + ADF_THREE_SERVICES, }; -extern const char *const adf_cfg_services[SVC_COUNT]; +#define MAX_NUM_CONCURR_SVC ADF_THREE_SERVICES +int adf_parse_service_string(struct adf_accel_dev *accel_dev, const char *in, + size_t in_len, char *out, size_t out_len); int adf_get_service_enabled(struct adf_accel_dev *accel_dev); #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h index e015ad6cace2..b79982c4a856 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h @@ -27,13 +27,9 @@ #define ADF_CFG_CY "sym;asym" #define ADF_CFG_SYM "sym" #define ADF_CFG_ASYM "asym" -#define ADF_CFG_ASYM_SYM "asym;sym" -#define ADF_CFG_ASYM_DC "asym;dc" -#define ADF_CFG_DC_ASYM "dc;asym" -#define ADF_CFG_SYM_DC "sym;dc" -#define ADF_CFG_DC_SYM "dc;sym" #define ADF_CFG_DCC "dcc" #define ADF_SERVICES_ENABLED "ServicesEnabled" +#define ADF_SERVICES_DELIMITER ";" #define ADF_PM_IDLE_SUPPORT "PmIdleSupport" #define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled" #define ADF_ETRMGR_COALESCING_ENABLED_FORMAT \ diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c index 1f64bf49b221..2b263442c856 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c @@ -115,8 +115,8 @@ u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev; + u32 fuses = hw_data->fuses[ADF_FUSECTL0]; u32 straps = hw_data->straps; - u32 fuses = hw_data->fuses; u32 legfuses; u32 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c index fe1f3d727dc5..f97e7a880f3a 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c @@ -213,7 +213,6 @@ static int adf_no_dev_config(struct adf_accel_dev *accel_dev) */ int adf_gen4_dev_config(struct adf_accel_dev *accel_dev) { - char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; int ret; ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC); @@ -224,18 +223,8 @@ int adf_gen4_dev_config(struct adf_accel_dev *accel_dev) if (ret) goto err; - ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, - ADF_SERVICES_ENABLED, services); - if (ret) - goto err; - - ret = sysfs_match_string(adf_cfg_services, services); - if (ret < 0) - goto err; - - switch (ret) { - case SVC_CY: - case SVC_CY2: + switch (adf_get_service_enabled(accel_dev)) { + case SVC_SYM_ASYM: ret = adf_crypto_dev_config(accel_dev); break; case SVC_DC: diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c index 41a0979e68c1..099949a2421c 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2020 Intel Corporation */ +#include <linux/bitops.h> #include <linux/iopoll.h> #include <asm/div64.h> #include "adf_accel_devices.h" @@ -134,36 +135,18 @@ int adf_gen4_init_device(struct adf_accel_dev *accel_dev) } EXPORT_SYMBOL_GPL(adf_gen4_init_device); -static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper, - u32 *lower) -{ - *lower = lower_32_bits(value); - *upper = upper_32_bits(value); -} - void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev) { void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); u64 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE; u64 timer_val = ADF_SSM_WDT_DEFAULT_VALUE; - u32 ssm_wdt_pke_high = 0; - u32 ssm_wdt_pke_low = 0; - u32 ssm_wdt_high = 0; - u32 ssm_wdt_low = 0; - /* Convert 64bit WDT timer value into 32bit values for - * mmio write to 32bit CSRs. - */ - adf_gen4_unpack_ssm_wdtimer(timer_val, &ssm_wdt_high, &ssm_wdt_low); - adf_gen4_unpack_ssm_wdtimer(timer_val_pke, &ssm_wdt_pke_high, - &ssm_wdt_pke_low); - - /* Enable WDT for sym and dc */ - ADF_CSR_WR(pmisc_addr, ADF_SSMWDTL_OFFSET, ssm_wdt_low); - ADF_CSR_WR(pmisc_addr, ADF_SSMWDTH_OFFSET, ssm_wdt_high); - /* Enable WDT for pke */ - ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ssm_wdt_pke_low); - ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEH_OFFSET, ssm_wdt_pke_high); + /* Enable watchdog timer for sym and dc */ + ADF_CSR_WR64_LO_HI(pmisc_addr, ADF_SSMWDTL_OFFSET, ADF_SSMWDTH_OFFSET, timer_val); + + /* Enable watchdog timer for pke */ + ADF_CSR_WR64_LO_HI(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ADF_SSMWDTPKEH_OFFSET, + timer_val_pke); } EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer); @@ -265,18 +248,29 @@ static bool is_single_service(int service_id) case SVC_SYM: case SVC_ASYM: return true; - case SVC_CY: - case SVC_CY2: - case SVC_DCC: - case SVC_ASYM_DC: - case SVC_DC_ASYM: - case SVC_SYM_DC: - case SVC_DC_SYM: default: return false; } } +bool adf_gen4_services_supported(unsigned long mask) +{ + unsigned long num_svc = hweight_long(mask); + + if (mask >= BIT(SVC_BASE_COUNT)) + return false; + + switch (num_svc) { + case ADF_ONE_SERVICE: + return true; + case ADF_TWO_SERVICES: + return !test_bit(SVC_DCC, &mask); + default: + return false; + } +} +EXPORT_SYMBOL_GPL(adf_gen4_services_supported); + int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index e8c53bd76f1b..51fc2eaa263e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -179,5 +179,6 @@ int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number, struct bank_state *state); int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number, struct bank_state *state); +bool adf_gen4_services_supported(unsigned long service_mask); #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c index 2dd3772bf58a..0f7f00a19e7d 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c @@ -695,7 +695,7 @@ static bool adf_handle_slice_hang_error(struct adf_accel_dev *accel_dev, if (err_mask->parerr_wat_wcp_mask) adf_poll_slicehang_csr(accel_dev, csr, ADF_GEN4_SLICEHANGSTATUS_WAT_WCP, - "ath_cph"); + "wat_wcp"); return false; } @@ -1043,63 +1043,16 @@ static bool adf_handle_ssmcpppar_err(struct adf_accel_dev *accel_dev, return reset_required; } -static bool adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev, +static void adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev, void __iomem *csr, u32 iastatssm) { - struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); - u32 reg; - if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SSMSOFTERRORPARITY_BIT)) - return false; - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC); - reg &= ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC, reg); - } - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH); - reg &= err_mask->parerr_ath_cph_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH, reg); - } - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT); - reg &= err_mask->parerr_cpr_xlt_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT, reg); - } - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS); - reg &= err_mask->parerr_dcpr_ucs_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS, reg); - } - - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE); - reg &= err_mask->parerr_pke_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE, reg); - } - - if (err_mask->parerr_wat_wcp_mask) { - reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP); - reg &= err_mask->parerr_wat_wcp_mask; - if (reg) { - ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); - ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP, - reg); - } - } + return; + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); dev_err(&GET_DEV(accel_dev), "Slice ssm soft parity error reported"); - return false; + return; } static bool adf_handle_ser_err_ssmsh(struct adf_accel_dev *accel_dev, @@ -1171,8 +1124,8 @@ static bool adf_handle_iaintstatssm(struct adf_accel_dev *accel_dev, reset_required |= adf_handle_slice_hang_error(accel_dev, csr, iastatssm); reset_required |= adf_handle_spppar_err(accel_dev, csr, iastatssm); reset_required |= adf_handle_ssmcpppar_err(accel_dev, csr, iastatssm); - reset_required |= adf_handle_rf_parr_err(accel_dev, csr, iastatssm); reset_required |= adf_handle_ser_err_ssmsh(accel_dev, csr, iastatssm); + adf_handle_rf_parr_err(accel_dev, csr, iastatssm); ADF_CSR_WR(csr, ADF_GEN4_IAINTSTATSSM, iastatssm); diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c index 4fcd61ff70d1..6c39194647f0 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c @@ -3,6 +3,7 @@ #include <linux/device.h> #include <linux/errno.h> #include <linux/pci.h> +#include <linux/string_choices.h> #include "adf_accel_devices.h" #include "adf_cfg.h" #include "adf_cfg_services.h" @@ -19,14 +20,12 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct adf_accel_dev *accel_dev; - char *state; accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); if (!accel_dev) return -EINVAL; - state = adf_dev_started(accel_dev) ? "up" : "down"; - return sysfs_emit(buf, "%s\n", state); + return sysfs_emit(buf, "%s\n", str_up_down(adf_dev_started(accel_dev))); } static ssize_t state_store(struct device *dev, struct device_attribute *attr, @@ -117,25 +116,27 @@ static int adf_sysfs_update_dev_config(struct adf_accel_dev *accel_dev, static ssize_t cfg_services_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { }; struct adf_hw_device_data *hw_data; struct adf_accel_dev *accel_dev; int ret; - ret = sysfs_match_string(adf_cfg_services, buf); - if (ret < 0) - return ret; - accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); if (!accel_dev) return -EINVAL; + ret = adf_parse_service_string(accel_dev, buf, count, services, + ADF_CFG_MAX_VAL_LEN_IN_BYTES); + if (ret) + return ret; + if (adf_dev_started(accel_dev)) { dev_info(dev, "Device qat_dev%d must be down to reconfigure the service.\n", accel_dev->accel_id); return -EINVAL; } - ret = adf_sysfs_update_dev_config(accel_dev, adf_cfg_services[ret]); + ret = adf_sysfs_update_dev_config(accel_dev, services); if (ret < 0) return ret; @@ -207,16 +208,13 @@ static DEVICE_ATTR_RW(pm_idle_enabled); static ssize_t auto_reset_show(struct device *dev, struct device_attribute *attr, char *buf) { - char *auto_reset; struct adf_accel_dev *accel_dev; accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); if (!accel_dev) return -EINVAL; - auto_reset = accel_dev->autoreset_on_error ? "on" : "off"; - - return sysfs_emit(buf, "%s\n", auto_reset); + return sysfs_emit(buf, "%s\n", str_on_off(accel_dev->autoreset_on_error)); } static ssize_t auto_reset_store(struct device *dev, struct device_attribute *attr, diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h index a03d43fef2b3..04f645957e28 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_comp.h @@ -16,8 +16,8 @@ enum icp_qat_fw_comp_20_cmd_id { ICP_QAT_FW_COMP_20_CMD_LZ4_DECOMPRESS = 4, ICP_QAT_FW_COMP_20_CMD_LZ4S_COMPRESS = 5, ICP_QAT_FW_COMP_20_CMD_LZ4S_DECOMPRESS = 6, - ICP_QAT_FW_COMP_20_CMD_XP10_COMPRESS = 7, - ICP_QAT_FW_COMP_20_CMD_XP10_DECOMPRESS = 8, + ICP_QAT_FW_COMP_20_CMD_RESERVED_7 = 7, + ICP_QAT_FW_COMP_20_CMD_RESERVED_8 = 8, ICP_QAT_FW_COMP_20_CMD_RESERVED_9 = 9, ICP_QAT_FW_COMP_23_CMD_ZSTD_COMPRESS = 10, ICP_QAT_FW_COMP_23_CMD_ZSTD_DECOMPRESS = 11, diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h index e28241bdd0f4..1c7bcd8e4055 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h @@ -43,7 +43,6 @@ #define ICP_QAT_SUOF_OBJS "SUF_OBJS" #define ICP_QAT_SUOF_IMAG "SUF_IMAG" #define ICP_QAT_SIMG_AE_INIT_SEQ_LEN (50 * sizeof(unsigned long long)) -#define ICP_QAT_SIMG_AE_INSTS_LEN (0x4000 * sizeof(unsigned long long)) #define DSS_FWSK_MODULUS_LEN 384 /* RSA3K */ #define DSS_FWSK_EXPONENT_LEN 4 @@ -75,13 +74,6 @@ DSS_SIGNATURE_LEN : \ CSS_SIGNATURE_LEN) -#define ICP_QAT_CSS_AE_IMG_LEN (sizeof(struct icp_qat_simg_ae_mode) + \ - ICP_QAT_SIMG_AE_INIT_SEQ_LEN + \ - ICP_QAT_SIMG_AE_INSTS_LEN) -#define ICP_QAT_CSS_AE_SIMG_LEN(handle) (sizeof(struct icp_qat_css_hdr) + \ - ICP_QAT_CSS_FWSK_PUB_LEN(handle) + \ - ICP_QAT_CSS_SIGNATURE_LEN(handle) + \ - ICP_QAT_CSS_AE_IMG_LEN) #define ICP_QAT_AE_IMG_OFFSET(handle) (sizeof(struct icp_qat_css_hdr) + \ ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + \ ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle) + \ @@ -404,8 +396,6 @@ struct icp_qat_suof_img_hdr { char *simg_buf; unsigned long simg_len; char *css_header; - char *css_key; - char *css_signature; char *css_simg; unsigned long simg_size; unsigned int ae_num; diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.c b/drivers/crypto/intel/qat/qat_common/qat_bl.c index 338acf29c487..5e4dad4693ca 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_bl.c +++ b/drivers/crypto/intel/qat/qat_common/qat_bl.c @@ -251,162 +251,3 @@ int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, extra_dst_buff, sz_extra_dst_buff, sskip, dskip, flags); } - -static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev, - struct qat_alg_buf_list *bl) -{ - struct device *dev = &GET_DEV(accel_dev); - int n = bl->num_bufs; - int i; - - for (i = 0; i < n; i++) - if (!dma_mapping_error(dev, bl->buffers[i].addr)) - dma_unmap_single(dev, bl->buffers[i].addr, - bl->buffers[i].len, DMA_FROM_DEVICE); -} - -static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev, - struct scatterlist *sgl, - struct qat_alg_buf_list **bl) -{ - struct device *dev = &GET_DEV(accel_dev); - struct qat_alg_buf_list *bufl; - int node = dev_to_node(dev); - struct scatterlist *sg; - int n, i, sg_nctr; - size_t sz; - - n = sg_nents(sgl); - sz = struct_size(bufl, buffers, n); - bufl = kzalloc_node(sz, GFP_KERNEL, node); - if (unlikely(!bufl)) - return -ENOMEM; - - for (i = 0; i < n; i++) - bufl->buffers[i].addr = DMA_MAPPING_ERROR; - - sg_nctr = 0; - for_each_sg(sgl, sg, n, i) { - int y = sg_nctr; - - if (!sg->length) - continue; - - bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg), - sg->length, - DMA_FROM_DEVICE); - bufl->buffers[y].len = sg->length; - if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr))) - goto err_map; - sg_nctr++; - } - bufl->num_bufs = sg_nctr; - bufl->num_mapped_bufs = sg_nctr; - - *bl = bufl; - - return 0; - -err_map: - for (i = 0; i < n; i++) - if (!dma_mapping_error(dev, bufl->buffers[i].addr)) - dma_unmap_single(dev, bufl->buffers[i].addr, - bufl->buffers[i].len, - DMA_FROM_DEVICE); - kfree(bufl); - *bl = NULL; - - return -ENOMEM; -} - -static void qat_bl_sgl_free_unmap(struct adf_accel_dev *accel_dev, - struct scatterlist *sgl, - struct qat_alg_buf_list *bl, - bool free_bl) -{ - if (bl) { - qat_bl_sgl_unmap(accel_dev, bl); - - if (free_bl) - kfree(bl); - } - if (sgl) - sgl_free(sgl); -} - -static int qat_bl_sgl_alloc_map(struct adf_accel_dev *accel_dev, - struct scatterlist **sgl, - struct qat_alg_buf_list **bl, - unsigned int dlen, - gfp_t gfp) -{ - struct scatterlist *dst; - int ret; - - dst = sgl_alloc(dlen, gfp, NULL); - if (!dst) { - dev_err(&GET_DEV(accel_dev), "sg_alloc failed\n"); - return -ENOMEM; - } - - ret = qat_bl_sgl_map(accel_dev, dst, bl); - if (ret) - goto err; - - *sgl = dst; - - return 0; - -err: - sgl_free(dst); - *sgl = NULL; - return ret; -} - -int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev, - struct scatterlist **sg, - unsigned int dlen, - struct qat_request_buffs *qat_bufs, - gfp_t gfp) -{ - struct device *dev = &GET_DEV(accel_dev); - dma_addr_t new_blp = DMA_MAPPING_ERROR; - struct qat_alg_buf_list *new_bl; - struct scatterlist *new_sg; - size_t new_bl_size; - int ret; - - ret = qat_bl_sgl_alloc_map(accel_dev, &new_sg, &new_bl, dlen, gfp); - if (ret) - return ret; - - new_bl_size = struct_size(new_bl, buffers, new_bl->num_bufs); - - /* Map new firmware SGL descriptor */ - new_blp = dma_map_single(dev, new_bl, new_bl_size, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, new_blp))) - goto err; - - /* Unmap old firmware SGL descriptor */ - dma_unmap_single(dev, qat_bufs->bloutp, qat_bufs->sz_out, DMA_TO_DEVICE); - - /* Free and unmap old scatterlist */ - qat_bl_sgl_free_unmap(accel_dev, *sg, qat_bufs->blout, - !qat_bufs->sgl_dst_valid); - - qat_bufs->sgl_dst_valid = false; - qat_bufs->blout = new_bl; - qat_bufs->bloutp = new_blp; - qat_bufs->sz_out = new_bl_size; - - *sg = new_sg; - - return 0; -err: - qat_bl_sgl_free_unmap(accel_dev, new_sg, new_bl, true); - - if (!dma_mapping_error(dev, new_blp)) - dma_unmap_single(dev, new_blp, new_bl_size, DMA_TO_DEVICE); - - return -ENOMEM; -} diff --git a/drivers/crypto/intel/qat/qat_common/qat_bl.h b/drivers/crypto/intel/qat/qat_common/qat_bl.h index 3f5b79015400..2827d5055d3c 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_bl.h +++ b/drivers/crypto/intel/qat/qat_common/qat_bl.h @@ -65,10 +65,4 @@ static inline gfp_t qat_algs_alloc_flags(struct crypto_async_request *req) return req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; } -int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev, - struct scatterlist **newd, - unsigned int dlen, - struct qat_request_buffs *qat_bufs, - gfp_t gfp); - #endif diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c index 2ba4aa22e092..a6e02405d402 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c +++ b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c @@ -29,11 +29,6 @@ struct qat_compression_ctx { int (*qat_comp_callback)(struct qat_compression_req *qat_req, void *resp); }; -struct qat_dst { - bool is_null; - int resubmitted; -}; - struct qat_compression_req { u8 req[QAT_COMP_REQ_SIZE]; struct qat_compression_ctx *qat_compression_ctx; @@ -42,8 +37,6 @@ struct qat_compression_req { enum direction dir; int actual_dlen; struct qat_alg_req alg_req; - struct work_struct resubmit; - struct qat_dst dst; }; static int qat_alg_send_dc_message(struct qat_compression_req *qat_req, @@ -60,46 +53,6 @@ static int qat_alg_send_dc_message(struct qat_compression_req *qat_req, return qat_alg_send_message(alg_req); } -static void qat_comp_resubmit(struct work_struct *work) -{ - struct qat_compression_req *qat_req = - container_of(work, struct qat_compression_req, resubmit); - struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx; - struct adf_accel_dev *accel_dev = ctx->inst->accel_dev; - struct qat_request_buffs *qat_bufs = &qat_req->buf; - struct qat_compression_instance *inst = ctx->inst; - struct acomp_req *areq = qat_req->acompress_req; - struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq); - unsigned int dlen = CRYPTO_ACOMP_DST_MAX; - u8 *req = qat_req->req; - dma_addr_t dfbuf; - int ret; - - areq->dlen = dlen; - - dev_dbg(&GET_DEV(accel_dev), "[%s][%s] retry NULL dst request - dlen = %d\n", - crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)), - qat_req->dir == COMPRESSION ? "comp" : "decomp", dlen); - - ret = qat_bl_realloc_map_new_dst(accel_dev, &areq->dst, dlen, qat_bufs, - qat_algs_alloc_flags(&areq->base)); - if (ret) - goto err; - - qat_req->dst.resubmitted = true; - - dfbuf = qat_req->buf.bloutp; - qat_comp_override_dst(req, dfbuf, dlen); - - ret = qat_alg_send_dc_message(qat_req, inst, &areq->base); - if (ret != -ENOSPC) - return; - -err: - qat_bl_free_bufl(accel_dev, qat_bufs); - acomp_request_complete(areq, ret); -} - static void qat_comp_generic_callback(struct qat_compression_req *qat_req, void *resp) { @@ -131,21 +84,6 @@ static void qat_comp_generic_callback(struct qat_compression_req *qat_req, areq->dlen = 0; - if (qat_req->dir == DECOMPRESSION && qat_req->dst.is_null) { - if (cmp_err == ERR_CODE_OVERFLOW_ERROR) { - if (qat_req->dst.resubmitted) { - dev_dbg(&GET_DEV(accel_dev), - "Output does not fit destination buffer\n"); - res = -EOVERFLOW; - goto end; - } - - INIT_WORK(&qat_req->resubmit, qat_comp_resubmit); - adf_misc_wq_queue_work(&qat_req->resubmit); - return; - } - } - if (unlikely(status != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) goto end; @@ -245,29 +183,9 @@ static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum directi if (!areq->src || !slen) return -EINVAL; - if (areq->dst && !dlen) + if (!areq->dst || !dlen) return -EINVAL; - qat_req->dst.is_null = false; - - /* Handle acomp requests that require the allocation of a destination - * buffer. The size of the destination buffer is double the source - * buffer (rounded up to the size of a page) to fit the decompressed - * output or an expansion on the data for compression. - */ - if (!areq->dst) { - qat_req->dst.is_null = true; - - dlen = round_up(2 * slen, PAGE_SIZE); - areq->dst = sgl_alloc(dlen, f, NULL); - if (!areq->dst) - return -ENOMEM; - - dlen -= dhdr + dftr; - areq->dlen = dlen; - qat_req->dst.resubmitted = false; - } - if (dir == COMPRESSION) { params.extra_dst_buff = inst->dc_data->ovf_buff_p; ovf_buff_sz = inst->dc_data->ovf_buff_sz; @@ -329,7 +247,6 @@ static struct acomp_alg qat_acomp[] = { { .exit = qat_comp_alg_exit_tfm, .compress = qat_comp_alg_compress, .decompress = qat_comp_alg_decompress, - .dst_free = sgl_free, .reqsize = sizeof(struct qat_compression_req), }}; diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_req.h b/drivers/crypto/intel/qat/qat_common/qat_comp_req.h index 404e32c5e778..18a1f33a6db9 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_comp_req.h +++ b/drivers/crypto/intel/qat/qat_common/qat_comp_req.h @@ -25,16 +25,6 @@ static inline void qat_comp_create_req(void *ctx, void *req, u64 src, u32 slen, req_pars->out_buffer_sz = dlen; } -static inline void qat_comp_override_dst(void *req, u64 dst, u32 dlen) -{ - struct icp_qat_fw_comp_req *fw_req = req; - struct icp_qat_fw_comp_req_params *req_pars = &fw_req->comp_pars; - - fw_req->comn_mid.dest_data_addr = dst; - fw_req->comn_mid.dst_length = dlen; - req_pars->out_buffer_sz = dlen; -} - static inline void qat_comp_create_compression_req(void *ctx, void *req, u64 src, u32 slen, u64 dst, u32 dlen, diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c index 7ea40b4f6e5b..7678a93c6853 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2014 - 2020 Intel Corporation */ +#include <linux/align.h> #include <linux/slab.h> #include <linux/ctype.h> #include <linux/kernel.h> @@ -1064,6 +1065,7 @@ static void qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle, struct icp_qat_suof_chunk_hdr *suof_chunk_hdr) { struct icp_qat_suof_handle *suof_handle = handle->sobj_handle; + unsigned int offset = ICP_QAT_AE_IMG_OFFSET(handle); struct icp_qat_simg_ae_mode *ae_mode; struct icp_qat_suof_objhdr *suof_objhdr; @@ -1075,13 +1077,7 @@ static void qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle, suof_chunk_hdr->offset))->img_length; suof_img_hdr->css_header = suof_img_hdr->simg_buf; - suof_img_hdr->css_key = (suof_img_hdr->css_header + - sizeof(struct icp_qat_css_hdr)); - suof_img_hdr->css_signature = suof_img_hdr->css_key + - ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + - ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle); - suof_img_hdr->css_simg = suof_img_hdr->css_signature + - ICP_QAT_CSS_SIGNATURE_LEN(handle); + suof_img_hdr->css_simg = suof_img_hdr->css_header + offset; ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg); suof_img_hdr->ae_mask = ae_mode->ae_mask; @@ -1419,20 +1415,21 @@ static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle, struct icp_qat_fw_auth_desc *auth_desc; struct icp_qat_auth_chunk *auth_chunk; u64 virt_addr, bus_addr, virt_base; - unsigned int length, simg_offset = sizeof(*auth_chunk); + unsigned int simg_offset = sizeof(*auth_chunk); struct icp_qat_simg_ae_mode *simg_ae_mode; struct icp_firml_dram_desc img_desc; + int ret; - if (size > (ICP_QAT_AE_IMG_OFFSET(handle) + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)) { - pr_err("QAT: error, input image size overflow %d\n", size); - return -EINVAL; - } - length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ? - ICP_QAT_CSS_AE_SIMG_LEN(handle) + simg_offset : - size + ICP_QAT_CSS_FWSK_PAD_LEN(handle) + simg_offset; - if (qat_uclo_simg_alloc(handle, &img_desc, length)) { + ret = qat_uclo_simg_alloc(handle, &img_desc, ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN); + if (ret) { pr_err("QAT: error, allocate continuous dram fail\n"); - return -ENOMEM; + return ret; + } + + if (!IS_ALIGNED(img_desc.dram_size, 8) || !img_desc.dram_bus_addr) { + pr_debug("QAT: invalid address\n"); + qat_uclo_simg_free(handle, &img_desc); + return -EINVAL; } auth_chunk = img_desc.dram_base_addr_v; @@ -1490,6 +1487,13 @@ static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle, auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); auth_desc->img_low = (unsigned int)bus_addr; auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET(handle); + if (bus_addr + auth_desc->img_len > img_desc.dram_bus_addr + + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN) { + pr_err("QAT: insufficient memory size for authentication data\n"); + qat_uclo_simg_free(handle, &img_desc); + return -ENOMEM; + } + memcpy((void *)(uintptr_t)virt_addr, (void *)(image + ICP_QAT_AE_IMG_OFFSET(handle)), auth_desc->img_len); diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile index cfd3bd757715..5bf5c890c362 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/Makefile +++ b/drivers/crypto/intel/qat/qat_dh895xcc/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o -qat_dh895xcc-objs := adf_drv.o adf_dh895xcc_hw_data.o +qat_dh895xcc-y := adf_drv.o adf_dh895xcc_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index c0661ff5e929..e48bcf1818cd 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -29,7 +29,7 @@ static struct adf_hw_device_class dh895xcc_class = { static u32 get_accel_mask(struct adf_hw_device_data *self) { - u32 fuses = self->fuses; + u32 fuses = self->fuses[ADF_FUSECTL0]; return ~fuses >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET & ADF_DH895XCC_ACCELERATORS_MASK; @@ -37,7 +37,7 @@ static u32 get_accel_mask(struct adf_hw_device_data *self) static u32 get_ae_mask(struct adf_hw_device_data *self) { - u32 fuses = self->fuses; + u32 fuses = self->fuses[ADF_FUSECTL0]; return ~fuses & ADF_DH895XCC_ACCELENGINES_MASK; } @@ -99,7 +99,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) static enum dev_sku_info get_sku(struct adf_hw_device_data *self) { - int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK) + int sku = (self->fuses[ADF_FUSECTL0] & ADF_DH895XCC_FUSECTL_SKU_MASK) >> ADF_DH895XCC_FUSECTL_SKU_SHIFT; switch (sku) { diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c index 3137fc3b5cf6..07e9d7e52861 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c @@ -126,7 +126,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adf_init_hw_data_dh895xcc(accel_dev->hw_device); pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET, - &hw_data->fuses); + &hw_data->fuses[ADF_FUSECTL0]); /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(hw_data); diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile index 64b54e92b2b4..93f9c81edf09 100644 --- a/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile +++ b/drivers/crypto/intel/qat/qat_dh895xccvf/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only ccflags-y := -I $(src)/../qat_common obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf.o -qat_dh895xccvf-objs := adf_drv.o adf_dh895xccvf_hw_data.o +qat_dh895xccvf-y := adf_drv.o adf_dh895xccvf_hw_data.o diff --git a/drivers/crypto/marvell/Kconfig b/drivers/crypto/marvell/Kconfig index 4c25a78ab3ed..aa269abb0499 100644 --- a/drivers/crypto/marvell/Kconfig +++ b/drivers/crypto/marvell/Kconfig @@ -24,7 +24,7 @@ config CRYPTO_DEV_OCTEONTX_CPT tristate "Support for Marvell OcteonTX CPT driver" depends on ARCH_THUNDER || COMPILE_TEST depends on PCI_MSI && 64BIT - depends on CRYPTO_LIB_AES + select CRYPTO_LIB_AES select CRYPTO_SKCIPHER select CRYPTO_HASH select CRYPTO_AEAD @@ -41,10 +41,10 @@ config CRYPTO_DEV_OCTEONTX2_CPT tristate "Marvell OcteonTX2 CPT driver" depends on ARCH_THUNDER2 || COMPILE_TEST depends on PCI_MSI && 64BIT - depends on CRYPTO_LIB_AES depends on NET_VENDOR_MARVELL select OCTEONTX2_MBOX select CRYPTO_DEV_MARVELL + select CRYPTO_LIB_AES select CRYPTO_SKCIPHER select CRYPTO_HASH select CRYPTO_AEAD diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c index c4250e5fcf8f..9f5601c0280b 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.c @@ -10,6 +10,7 @@ #include <linux/ctype.h> #include <linux/firmware.h> +#include <linux/string_choices.h> #include "otx_cpt_common.h" #include "otx_cptpf_ucode.h" #include "otx_cptpf.h" @@ -505,17 +506,6 @@ int otx_cpt_uc_supports_eng_type(struct otx_cpt_ucode *ucode, int eng_type) } EXPORT_SYMBOL_GPL(otx_cpt_uc_supports_eng_type); -int otx_cpt_eng_grp_has_eng_type(struct otx_cpt_eng_grp_info *eng_grp, - int eng_type) -{ - struct otx_cpt_engs_rsvd *engs; - - engs = find_engines_by_type(eng_grp, eng_type); - - return (engs != NULL ? 1 : 0); -} -EXPORT_SYMBOL_GPL(otx_cpt_eng_grp_has_eng_type); - static void print_ucode_info(struct otx_cpt_eng_grp_info *eng_grp, char *buf, int size) { @@ -614,8 +604,8 @@ static void print_dbg_info(struct device *dev, for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) { grp = &eng_grps->grp[i]; - pr_debug("engine_group%d, state %s\n", i, grp->is_enabled ? - "enabled" : "disabled"); + pr_debug("engine_group%d, state %s\n", i, + str_enabled_disabled(grp->is_enabled)); if (grp->is_enabled) { mirrored_grp = &eng_grps->grp[grp->mirror.idx]; pr_debug("Ucode0 filename %s, version %s\n", diff --git a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h index 8620ac87a447..df79ee416c0d 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h +++ b/drivers/crypto/marvell/octeontx/otx_cptpf_ucode.h @@ -174,7 +174,5 @@ int otx_cpt_try_create_default_eng_grps(struct pci_dev *pdev, void otx_cpt_set_eng_grps_is_rdonly(struct otx_cpt_eng_grps *eng_grps, bool is_rdonly); int otx_cpt_uc_supports_eng_type(struct otx_cpt_ucode *ucode, int eng_type); -int otx_cpt_eng_grp_has_eng_type(struct otx_cpt_eng_grp_info *eng_grp, - int eng_type); #endif /* __OTX_CPTPF_UCODE_H */ diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c index 5c9484646172..42c5484ce66a 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c @@ -3,6 +3,7 @@ #include <linux/ctype.h> #include <linux/firmware.h> +#include <linux/string_choices.h> #include "otx2_cptpf_ucode.h" #include "otx2_cpt_common.h" #include "otx2_cptpf.h" @@ -1774,102 +1775,3 @@ err_print: dev_err(dev, "%s\n", err_msg); return -EINVAL; } - -static void get_engs_info(struct otx2_cpt_eng_grp_info *eng_grp, char *buf, - int size, int idx) -{ - struct otx2_cpt_engs_rsvd *mirrored_engs = NULL; - struct otx2_cpt_engs_rsvd *engs; - int len, i; - - buf[0] = '\0'; - for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) { - engs = &eng_grp->engs[i]; - if (!engs->type) - continue; - if (idx != -1 && idx != i) - continue; - - if (eng_grp->mirror.is_ena) - mirrored_engs = find_engines_by_type( - &eng_grp->g->grp[eng_grp->mirror.idx], - engs->type); - if (i > 0 && idx == -1) { - len = strlen(buf); - scnprintf(buf + len, size - len, ", "); - } - - len = strlen(buf); - scnprintf(buf + len, size - len, "%d %s ", - mirrored_engs ? engs->count + mirrored_engs->count : - engs->count, - get_eng_type_str(engs->type)); - if (mirrored_engs) { - len = strlen(buf); - scnprintf(buf + len, size - len, - "(%d shared with engine_group%d) ", - engs->count <= 0 ? - engs->count + mirrored_engs->count : - mirrored_engs->count, - eng_grp->mirror.idx); - } - } -} - -void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf) -{ - struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps; - struct otx2_cpt_eng_grp_info *mirrored_grp; - char engs_info[2 * OTX2_CPT_NAME_LENGTH]; - struct otx2_cpt_eng_grp_info *grp; - struct otx2_cpt_engs_rsvd *engs; - int i, j; - - pr_debug("Engine groups global info"); - pr_debug("max SE %d, max IE %d, max AE %d", eng_grps->avail.max_se_cnt, - eng_grps->avail.max_ie_cnt, eng_grps->avail.max_ae_cnt); - pr_debug("free SE %d", eng_grps->avail.se_cnt); - pr_debug("free IE %d", eng_grps->avail.ie_cnt); - pr_debug("free AE %d", eng_grps->avail.ae_cnt); - - for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) { - grp = &eng_grps->grp[i]; - pr_debug("engine_group%d, state %s", i, - grp->is_enabled ? "enabled" : "disabled"); - if (grp->is_enabled) { - mirrored_grp = &eng_grps->grp[grp->mirror.idx]; - pr_debug("Ucode0 filename %s, version %s", - grp->mirror.is_ena ? - mirrored_grp->ucode[0].filename : - grp->ucode[0].filename, - grp->mirror.is_ena ? - mirrored_grp->ucode[0].ver_str : - grp->ucode[0].ver_str); - if (is_2nd_ucode_used(grp)) - pr_debug("Ucode1 filename %s, version %s", - grp->ucode[1].filename, - grp->ucode[1].ver_str); - } - - for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) { - engs = &grp->engs[j]; - if (engs->type) { - u32 mask[5] = { }; - - get_engs_info(grp, engs_info, - 2 * OTX2_CPT_NAME_LENGTH, j); - pr_debug("Slot%d: %s", j, engs_info); - bitmap_to_arr32(mask, engs->bmap, - eng_grps->engs_num); - if (is_dev_otx2(cptpf->pdev)) - pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x", - mask[3], mask[2], mask[1], - mask[0]); - else - pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x %8.8x", - mask[4], mask[3], mask[2], mask[1], - mask[0]); - } - } - } -} diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h index 365fe8943bd9..7e6a6a4ec37c 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h @@ -166,7 +166,6 @@ int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf, struct devlink_param_gset_ctx *ctx); int otx2_cpt_dl_custom_egrp_delete(struct otx2_cptpf_dev *cptpf, struct devlink_param_gset_ctx *ctx); -void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf); struct otx2_cpt_engs_rsvd *find_engines_by_type( struct otx2_cpt_eng_grp_info *eng_grp, int eng_type); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c index 5387c68f3c9d..426244107037 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c @@ -264,9 +264,10 @@ static int cpt_process_ccode(struct otx2_cptlfs_info *lfs, break; } - dev_err(&pdev->dev, - "Request failed with software error code 0x%x\n", - cpt_status->s.uc_compcode); + pr_debug("Request failed with software error code 0x%x: algo = %s driver = %s\n", + cpt_status->s.uc_compcode, + info->req->areq->tfm->__crt_alg->cra_name, + info->req->areq->tfm->__crt_alg->cra_driver_name); otx2_cpt_dump_sg_list(pdev, info->req); break; } diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c index d94a26c3541a..133ebc998236 100644 --- a/drivers/crypto/mxs-dcp.c +++ b/drivers/crypto/mxs-dcp.c @@ -265,12 +265,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx, MXS_DCP_CONTROL0_INTERRUPT | MXS_DCP_CONTROL0_ENABLE_CIPHER; - if (key_referenced) - /* Set OTP key bit to select the key via KEY_SELECT. */ - desc->control0 |= MXS_DCP_CONTROL0_OTP_KEY; - else + if (!key_referenced) /* Payload contains the key. */ desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY; + else if (actx->key[0] == DCP_PAES_KEY_OTP) + /* Set OTP key bit to select the key via KEY_SELECT. */ + desc->control0 |= MXS_DCP_CONTROL0_OTP_KEY; if (rctx->enc) desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT; diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c index 82214cde2bcd..b950fcce8a9b 100644 --- a/drivers/crypto/nx/nx-842.c +++ b/drivers/crypto/nx/nx-842.c @@ -101,9 +101,13 @@ static int update_param(struct nx842_crypto_param *p, return 0; } -int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver) +void *nx842_crypto_alloc_ctx(struct nx842_driver *driver) { - struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct nx842_crypto_ctx *ctx; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return ERR_PTR(-ENOMEM); spin_lock_init(&ctx->lock); ctx->driver = driver; @@ -114,22 +118,23 @@ int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver) kfree(ctx->wmem); free_page((unsigned long)ctx->sbounce); free_page((unsigned long)ctx->dbounce); - return -ENOMEM; + kfree(ctx); + return ERR_PTR(-ENOMEM); } - return 0; + return ctx; } -EXPORT_SYMBOL_GPL(nx842_crypto_init); +EXPORT_SYMBOL_GPL(nx842_crypto_alloc_ctx); -void nx842_crypto_exit(struct crypto_tfm *tfm) +void nx842_crypto_free_ctx(void *p) { - struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct nx842_crypto_ctx *ctx = p; kfree(ctx->wmem); free_page((unsigned long)ctx->sbounce); free_page((unsigned long)ctx->dbounce); } -EXPORT_SYMBOL_GPL(nx842_crypto_exit); +EXPORT_SYMBOL_GPL(nx842_crypto_free_ctx); static void check_constraints(struct nx842_constraints *c) { @@ -246,11 +251,11 @@ nospc: return update_param(p, slen, dskip + dlen); } -int nx842_crypto_compress(struct crypto_tfm *tfm, +int nx842_crypto_compress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) + u8 *dst, unsigned int *dlen, void *pctx) { - struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct nx842_crypto_ctx *ctx = pctx; struct nx842_crypto_header *hdr = container_of(&ctx->header, struct nx842_crypto_header, hdr); @@ -431,11 +436,11 @@ usesw: return update_param(p, slen + padding, dlen); } -int nx842_crypto_decompress(struct crypto_tfm *tfm, +int nx842_crypto_decompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen) + u8 *dst, unsigned int *dlen, void *pctx) { - struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); + struct nx842_crypto_ctx *ctx = pctx; struct nx842_crypto_header *hdr; struct nx842_crypto_param p; struct nx842_constraints c = *ctx->driver->constraints; diff --git a/drivers/crypto/nx/nx-842.h b/drivers/crypto/nx/nx-842.h index 887d4ce3cb49..f5e2c82ba876 100644 --- a/drivers/crypto/nx/nx-842.h +++ b/drivers/crypto/nx/nx-842.h @@ -3,7 +3,6 @@ #ifndef __NX_842_H__ #define __NX_842_H__ -#include <crypto/algapi.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> @@ -101,6 +100,8 @@ #define LEN_ON_SIZE(pa, size) ((size) - ((pa) & ((size) - 1))) #define LEN_ON_PAGE(pa) LEN_ON_SIZE(pa, PAGE_SIZE) +struct crypto_scomp; + static inline unsigned long nx842_get_pa(void *addr) { if (!is_vmalloc_addr(addr)) @@ -182,13 +183,13 @@ struct nx842_crypto_ctx { struct nx842_driver *driver; }; -int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver); -void nx842_crypto_exit(struct crypto_tfm *tfm); -int nx842_crypto_compress(struct crypto_tfm *tfm, +void *nx842_crypto_alloc_ctx(struct nx842_driver *driver); +void nx842_crypto_free_ctx(void *ctx); +int nx842_crypto_compress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen); -int nx842_crypto_decompress(struct crypto_tfm *tfm, + u8 *dst, unsigned int *dlen, void *ctx); +int nx842_crypto_decompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen); + u8 *dst, unsigned int *dlen, void *ctx); #endif /* __NX_842_H__ */ diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c index c843f4c6f684..56a0b3a67c33 100644 --- a/drivers/crypto/nx/nx-aes-ccm.c +++ b/drivers/crypto/nx/nx-aes-ccm.c @@ -217,13 +217,11 @@ static int generate_pat(u8 *iv, memset(b1, 0, 16); if (assoclen <= 65280) { *(u16 *)b1 = assoclen; - scatterwalk_map_and_copy(b1 + 2, req->src, 0, - iauth_len, SCATTERWALK_FROM_SG); + memcpy_from_sglist(b1 + 2, req->src, 0, iauth_len); } else { *(u16 *)b1 = (u16)(0xfffe); *(u32 *)&b1[2] = assoclen; - scatterwalk_map_and_copy(b1 + 6, req->src, 0, - iauth_len, SCATTERWALK_FROM_SG); + memcpy_from_sglist(b1 + 6, req->src, 0, iauth_len); } } @@ -341,9 +339,8 @@ static int ccm_nx_decrypt(struct aead_request *req, nbytes -= authsize; /* copy out the auth tag to compare with later */ - scatterwalk_map_and_copy(priv->oauth_tag, - req->src, nbytes + req->assoclen, authsize, - SCATTERWALK_FROM_SG); + memcpy_from_sglist(priv->oauth_tag, req->src, nbytes + req->assoclen, + authsize); rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen, csbcpb->cpb.aes_ccm.in_pat_or_b0); @@ -465,9 +462,8 @@ static int ccm_nx_encrypt(struct aead_request *req, } while (processed < nbytes); /* copy out the auth tag */ - scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac, - req->dst, nbytes + req->assoclen, authsize, - SCATTERWALK_TO_SG); + memcpy_to_sglist(req->dst, nbytes + req->assoclen, + csbcpb->cpb.aes_ccm.out_pat_or_mac, authsize); out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c index 4a796318b430..b7fe2de96d96 100644 --- a/drivers/crypto/nx/nx-aes-gcm.c +++ b/drivers/crypto/nx/nx-aes-gcm.c @@ -103,16 +103,13 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx, { int rc; struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; - struct scatter_walk walk; struct nx_sg *nx_sg = nx_ctx->in_sg; unsigned int nbytes = assoclen; unsigned int processed = 0, to_process; unsigned int max_sg_len; if (nbytes <= AES_BLOCK_SIZE) { - scatterwalk_start(&walk, req->src); - scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG); - scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); + memcpy_from_sglist(out, req->src, 0, nbytes); return 0; } @@ -391,19 +388,17 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc, mac: if (enc) { /* copy out the auth tag */ - scatterwalk_map_and_copy( - csbcpb->cpb.aes_gcm.out_pat_or_mac, + memcpy_to_sglist( req->dst, req->assoclen + nbytes, - crypto_aead_authsize(crypto_aead_reqtfm(req)), - SCATTERWALK_TO_SG); + csbcpb->cpb.aes_gcm.out_pat_or_mac, + crypto_aead_authsize(crypto_aead_reqtfm(req))); } else { u8 *itag = nx_ctx->priv.gcm.iauth_tag; u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac; - scatterwalk_map_and_copy( + memcpy_from_sglist( itag, req->src, req->assoclen + nbytes, - crypto_aead_authsize(crypto_aead_reqtfm(req)), - SCATTERWALK_FROM_SG); + crypto_aead_authsize(crypto_aead_reqtfm(req))); rc = crypto_memneq(itag, otag, crypto_aead_authsize(crypto_aead_reqtfm(req))) ? -EBADMSG : 0; diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c index 8c859872c183..fd0a98b2fb1b 100644 --- a/drivers/crypto/nx/nx-common-powernv.c +++ b/drivers/crypto/nx/nx-common-powernv.c @@ -9,6 +9,7 @@ #include "nx-842.h" +#include <crypto/internal/scompress.h> #include <linux/timer.h> #include <asm/prom.h> @@ -1031,23 +1032,21 @@ static struct nx842_driver nx842_powernv_driver = { .decompress = nx842_powernv_decompress, }; -static int nx842_powernv_crypto_init(struct crypto_tfm *tfm) +static void *nx842_powernv_crypto_alloc_ctx(void) { - return nx842_crypto_init(tfm, &nx842_powernv_driver); + return nx842_crypto_alloc_ctx(&nx842_powernv_driver); } -static struct crypto_alg nx842_powernv_alg = { - .cra_name = "842", - .cra_driver_name = "842-nx", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct nx842_crypto_ctx), - .cra_module = THIS_MODULE, - .cra_init = nx842_powernv_crypto_init, - .cra_exit = nx842_crypto_exit, - .cra_u = { .compress = { - .coa_compress = nx842_crypto_compress, - .coa_decompress = nx842_crypto_decompress } } +static struct scomp_alg nx842_powernv_alg = { + .base.cra_name = "842", + .base.cra_driver_name = "842-nx", + .base.cra_priority = 300, + .base.cra_module = THIS_MODULE, + + .alloc_ctx = nx842_powernv_crypto_alloc_ctx, + .free_ctx = nx842_crypto_free_ctx, + .compress = nx842_crypto_compress, + .decompress = nx842_crypto_decompress, }; static __init int nx_compress_powernv_init(void) @@ -1107,7 +1106,7 @@ static __init int nx_compress_powernv_init(void) nx842_powernv_exec = nx842_exec_vas; } - ret = crypto_register_alg(&nx842_powernv_alg); + ret = crypto_register_scomp(&nx842_powernv_alg); if (ret) { nx_delete_coprocs(); return ret; @@ -1128,7 +1127,7 @@ static void __exit nx_compress_powernv_exit(void) if (!nx842_ct) vas_unregister_api_powernv(); - crypto_unregister_alg(&nx842_powernv_alg); + crypto_unregister_scomp(&nx842_powernv_alg); nx_delete_coprocs(); } diff --git a/drivers/crypto/nx/nx-common-pseries.c b/drivers/crypto/nx/nx-common-pseries.c index 1660c5cf3641..f528e072494a 100644 --- a/drivers/crypto/nx/nx-common-pseries.c +++ b/drivers/crypto/nx/nx-common-pseries.c @@ -11,6 +11,7 @@ #include <asm/vio.h> #include <asm/hvcall.h> #include <asm/vas.h> +#include <crypto/internal/scompress.h> #include "nx-842.h" #include "nx_csbcpb.h" /* struct nx_csbcpb */ @@ -1008,23 +1009,21 @@ static struct nx842_driver nx842_pseries_driver = { .decompress = nx842_pseries_decompress, }; -static int nx842_pseries_crypto_init(struct crypto_tfm *tfm) +static void *nx842_pseries_crypto_alloc_ctx(void) { - return nx842_crypto_init(tfm, &nx842_pseries_driver); + return nx842_crypto_alloc_ctx(&nx842_pseries_driver); } -static struct crypto_alg nx842_pseries_alg = { - .cra_name = "842", - .cra_driver_name = "842-nx", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, - .cra_ctxsize = sizeof(struct nx842_crypto_ctx), - .cra_module = THIS_MODULE, - .cra_init = nx842_pseries_crypto_init, - .cra_exit = nx842_crypto_exit, - .cra_u = { .compress = { - .coa_compress = nx842_crypto_compress, - .coa_decompress = nx842_crypto_decompress } } +static struct scomp_alg nx842_pseries_alg = { + .base.cra_name = "842", + .base.cra_driver_name = "842-nx", + .base.cra_priority = 300, + .base.cra_module = THIS_MODULE, + + .alloc_ctx = nx842_pseries_crypto_alloc_ctx, + .free_ctx = nx842_crypto_free_ctx, + .compress = nx842_crypto_compress, + .decompress = nx842_crypto_decompress, }; static int nx842_probe(struct vio_dev *viodev, @@ -1072,7 +1071,7 @@ static int nx842_probe(struct vio_dev *viodev, if (ret) goto error; - ret = crypto_register_alg(&nx842_pseries_alg); + ret = crypto_register_scomp(&nx842_pseries_alg); if (ret) { dev_err(&viodev->dev, "could not register comp alg: %d\n", ret); goto error; @@ -1120,7 +1119,7 @@ static void nx842_remove(struct vio_dev *viodev) if (caps_feat) sysfs_remove_group(&viodev->dev.kobj, &nxcop_caps_attr_group); - crypto_unregister_alg(&nx842_pseries_alg); + crypto_unregister_scomp(&nx842_pseries_alg); of_reconfig_notifier_unregister(&nx842_of_nb); @@ -1145,6 +1144,7 @@ static void __init nxcop_get_capabilities(void) { struct hv_vas_all_caps *hv_caps; struct hv_nx_cop_caps *hv_nxc; + u64 feat; int rc; hv_caps = kmalloc(sizeof(*hv_caps), GFP_KERNEL); @@ -1155,27 +1155,26 @@ static void __init nxcop_get_capabilities(void) */ rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, 0, (u64)virt_to_phys(hv_caps)); + if (!rc) + feat = be64_to_cpu(hv_caps->feat_type); + kfree(hv_caps); if (rc) - goto out; + return; + if (!(feat & VAS_NX_GZIP_FEAT_BIT)) + return; - caps_feat = be64_to_cpu(hv_caps->feat_type); /* * NX-GZIP feature available */ - if (caps_feat & VAS_NX_GZIP_FEAT_BIT) { - hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL); - if (!hv_nxc) - goto out; - /* - * Get capabilities for NX-GZIP feature - */ - rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, - VAS_NX_GZIP_FEAT, - (u64)virt_to_phys(hv_nxc)); - } else { - pr_err("NX-GZIP feature is not available\n"); - rc = -EINVAL; - } + hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL); + if (!hv_nxc) + return; + /* + * Get capabilities for NX-GZIP feature + */ + rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, + VAS_NX_GZIP_FEAT, + (u64)virt_to_phys(hv_nxc)); if (!rc) { nx_cop_caps.descriptor = be64_to_cpu(hv_nxc->descriptor); @@ -1185,13 +1184,10 @@ static void __init nxcop_get_capabilities(void) be64_to_cpu(hv_nxc->min_compress_len); nx_cop_caps.min_decompress_len = be64_to_cpu(hv_nxc->min_decompress_len); - } else { - caps_feat = 0; + caps_feat = feat; } kfree(hv_nxc); -out: - kfree(hv_caps); } static const struct vio_device_id nx842_vio_driver_ids[] = { @@ -1256,7 +1252,7 @@ static void __exit nx842_pseries_exit(void) vas_unregister_api_pseries(); - crypto_unregister_alg(&nx842_pseries_alg); + crypto_unregister_scomp(&nx842_pseries_alg); spin_lock_irqsave(&devdata_spinlock, flags); old_devdata = rcu_dereference_check(devdata, diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index 010e87d9da36..a3b979193d9b 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c @@ -153,40 +153,18 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst, { struct scatter_walk walk; struct nx_sg *nx_sg = nx_dst; - unsigned int n, offset = 0, len = *src_len; - char *dst; + unsigned int n, len = *src_len; /* we need to fast forward through @start bytes first */ - for (;;) { - scatterwalk_start(&walk, sg_src); - - if (start < offset + sg_src->length) - break; - - offset += sg_src->length; - sg_src = sg_next(sg_src); - } - - /* start - offset is the number of bytes to advance in the scatterlist - * element we're currently looking at */ - scatterwalk_advance(&walk, start - offset); + scatterwalk_start_at_pos(&walk, sg_src, start); while (len && (nx_sg - nx_dst) < sglen) { - n = scatterwalk_clamp(&walk, len); - if (!n) { - /* In cases where we have scatterlist chain sg_next - * handles with it properly */ - scatterwalk_start(&walk, sg_next(walk.sg)); - n = scatterwalk_clamp(&walk, len); - } - dst = scatterwalk_map(&walk); + n = scatterwalk_next(&walk, len); - nx_sg = nx_build_sg_list(nx_sg, dst, &n, sglen - (nx_sg - nx_dst)); - len -= n; + nx_sg = nx_build_sg_list(nx_sg, walk.addr, &n, sglen - (nx_sg - nx_dst)); - scatterwalk_unmap(dst); - scatterwalk_advance(&walk, n); - scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len); + scatterwalk_done_src(&walk, n); + len -= n; } /* update to_process */ *src_len -= len; diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h index 2697baebb6a3..e1b4b6927bec 100644 --- a/drivers/crypto/nx/nx.h +++ b/drivers/crypto/nx/nx.h @@ -189,7 +189,4 @@ extern struct shash_alg nx_shash_sha256_alg; extern struct nx_crypto_driver nx_driver; -#define SCATTERWALK_TO_SG 1 -#define SCATTERWALK_FROM_SG 0 - #endif diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index 6865c7f1fc1a..db9e84c0c9fb 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c @@ -125,7 +125,7 @@ out: static int padlock_sha1_final(struct shash_desc *desc, u8 *out) { - u8 buf[4]; + const u8 *buf = (void *)desc; return padlock_sha1_finup(desc, buf, 0, out); } @@ -186,7 +186,7 @@ out: static int padlock_sha256_final(struct shash_desc *desc, u8 *out) { - u8 buf[4]; + const u8 *buf = (void *)desc; return padlock_sha256_finup(desc, buf, 0, out); } diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c index 57ab237e899e..b4c3c14dafd5 100644 --- a/drivers/crypto/s5p-sss.c +++ b/drivers/crypto/s5p-sss.c @@ -458,19 +458,6 @@ static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg) *sg = NULL; } -static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg, - unsigned int nbytes, int out) -{ - struct scatter_walk walk; - - if (!nbytes) - return; - - scatterwalk_start(&walk, sg); - scatterwalk_copychunks(buf, &walk, nbytes, out); - scatterwalk_done(&walk, out, 0); -} - static void s5p_sg_done(struct s5p_aes_dev *dev) { struct skcipher_request *req = dev->req; @@ -480,8 +467,8 @@ static void s5p_sg_done(struct s5p_aes_dev *dev) dev_dbg(dev->dev, "Copying %d bytes of output data back to original place\n", dev->req->cryptlen); - s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst, - dev->req->cryptlen, 1); + memcpy_to_sglist(dev->req->dst, 0, sg_virt(dev->sg_dst_cpy), + dev->req->cryptlen); } s5p_free_sg_cpy(dev, &dev->sg_src_cpy); s5p_free_sg_cpy(dev, &dev->sg_dst_cpy); @@ -526,7 +513,7 @@ static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src, return -ENOMEM; } - s5p_sg_copy_buf(pages, src, dev->req->cryptlen, 0); + memcpy_from_sglist(pages, src, 0, dev->req->cryptlen); sg_init_table(*dst, 1); sg_set_buf(*dst, pages, len); @@ -1035,8 +1022,7 @@ static int s5p_hash_copy_sgs(struct s5p_hash_reqctx *ctx, if (ctx->bufcnt) memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt); - scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->skip, - new_len, 0); + memcpy_from_sglist(buf + ctx->bufcnt, sg, ctx->skip, new_len); sg_init_table(ctx->sgl, 1); sg_set_buf(ctx->sgl, buf, len); ctx->sg = ctx->sgl; @@ -1229,8 +1215,7 @@ static int s5p_hash_prepare_request(struct ahash_request *req, bool update) if (len > nbytes) len = nbytes; - scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src, - 0, len, 0); + memcpy_from_sglist(ctx->buffer + ctx->bufcnt, req->src, 0, len); ctx->bufcnt += len; nbytes -= len; ctx->skip = len; @@ -1253,9 +1238,8 @@ static int s5p_hash_prepare_request(struct ahash_request *req, bool update) hash_later = ctx->total - xmit_len; /* copy hash_later bytes from end of req->src */ /* previous bytes are in xmit_buf, so no overwrite */ - scatterwalk_map_and_copy(ctx->buffer, req->src, - req->nbytes - hash_later, - hash_later, 0); + memcpy_from_sglist(ctx->buffer, req->src, + req->nbytes - hash_later, hash_later); } if (xmit_len > BUFLEN) { @@ -1267,8 +1251,8 @@ static int s5p_hash_prepare_request(struct ahash_request *req, bool update) /* have buffered data only */ if (unlikely(!ctx->bufcnt)) { /* first update didn't fill up buffer */ - scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src, - 0, xmit_len, 0); + memcpy_from_sglist(ctx->dd->xmit_buf, req->src, + 0, xmit_len); } sg_init_table(ctx->sgl, 1); @@ -1506,8 +1490,8 @@ static int s5p_hash_update(struct ahash_request *req) return 0; if (ctx->bufcnt + req->nbytes <= BUFLEN) { - scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src, - 0, req->nbytes, 0); + memcpy_from_sglist(ctx->buffer + ctx->bufcnt, req->src, + 0, req->nbytes); ctx->bufcnt += req->nbytes; return 0; } diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c index 14c6339c2e43..5ce88e7a8f65 100644 --- a/drivers/crypto/stm32/stm32-cryp.c +++ b/drivers/crypto/stm32/stm32-cryp.c @@ -666,7 +666,7 @@ static void stm32_cryp_write_ccm_first_header(struct stm32_cryp *cryp) written = min_t(size_t, AES_BLOCK_SIZE - len, alen); - scatterwalk_copychunks((char *)block + len, &cryp->in_walk, written, 0); + memcpy_from_scatterwalk((char *)block + len, &cryp->in_walk, written); writesl(cryp->regs + cryp->caps->din, block, AES_BLOCK_32); @@ -993,7 +993,7 @@ static int stm32_cryp_header_dma_start(struct stm32_cryp *cryp) /* Advance scatterwalk to not DMA'ed data */ align_size = ALIGN_DOWN(cryp->header_in, cryp->hw_blocksize); - scatterwalk_copychunks(NULL, &cryp->in_walk, align_size, 2); + scatterwalk_skip(&cryp->in_walk, align_size); cryp->header_in -= align_size; ret = dma_submit_error(dmaengine_submit(tx_in)); @@ -1056,7 +1056,7 @@ static int stm32_cryp_dma_start(struct stm32_cryp *cryp) /* Advance scatterwalk to not DMA'ed data */ align_size = ALIGN_DOWN(cryp->payload_in, cryp->hw_blocksize); - scatterwalk_copychunks(NULL, &cryp->in_walk, align_size, 2); + scatterwalk_skip(&cryp->in_walk, align_size); cryp->payload_in -= align_size; ret = dma_submit_error(dmaengine_submit(tx_in)); @@ -1067,7 +1067,7 @@ static int stm32_cryp_dma_start(struct stm32_cryp *cryp) dma_async_issue_pending(cryp->dma_lch_in); /* Advance scatterwalk to not DMA'ed data */ - scatterwalk_copychunks(NULL, &cryp->out_walk, align_size, 2); + scatterwalk_skip(&cryp->out_walk, align_size); cryp->payload_out -= align_size; ret = dma_submit_error(dmaengine_submit(tx_out)); if (ret < 0) { @@ -1737,9 +1737,9 @@ static int stm32_cryp_prepare_req(struct skcipher_request *req, out_sg = areq->dst; scatterwalk_start(&cryp->in_walk, in_sg); - scatterwalk_start(&cryp->out_walk, out_sg); /* In output, jump after assoc data */ - scatterwalk_copychunks(NULL, &cryp->out_walk, cryp->areq->assoclen, 2); + scatterwalk_start_at_pos(&cryp->out_walk, out_sg, + areq->assoclen); ret = stm32_cryp_hw_init(cryp); if (ret) @@ -1873,12 +1873,12 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp) /* Get and write tag */ readsl(cryp->regs + cryp->caps->dout, out_tag, AES_BLOCK_32); - scatterwalk_copychunks(out_tag, &cryp->out_walk, cryp->authsize, 1); + memcpy_to_scatterwalk(&cryp->out_walk, out_tag, cryp->authsize); } else { /* Get and check tag */ u32 in_tag[AES_BLOCK_32], out_tag[AES_BLOCK_32]; - scatterwalk_copychunks(in_tag, &cryp->in_walk, cryp->authsize, 0); + memcpy_from_scatterwalk(in_tag, &cryp->in_walk, cryp->authsize); readsl(cryp->regs + cryp->caps->dout, out_tag, AES_BLOCK_32); if (crypto_memneq(in_tag, out_tag, cryp->authsize)) @@ -1923,8 +1923,8 @@ static void stm32_cryp_irq_read_data(struct stm32_cryp *cryp) u32 block[AES_BLOCK_32]; readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32)); - scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize, - cryp->payload_out), 1); + memcpy_to_scatterwalk(&cryp->out_walk, block, min_t(size_t, cryp->hw_blocksize, + cryp->payload_out)); cryp->payload_out -= min_t(size_t, cryp->hw_blocksize, cryp->payload_out); } @@ -1933,8 +1933,8 @@ static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp) { u32 block[AES_BLOCK_32] = {0}; - scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, cryp->hw_blocksize, - cryp->payload_in), 0); + memcpy_from_scatterwalk(block, &cryp->in_walk, min_t(size_t, cryp->hw_blocksize, + cryp->payload_in)); writesl(cryp->regs + cryp->caps->din, block, cryp->hw_blocksize / sizeof(u32)); cryp->payload_in -= min_t(size_t, cryp->hw_blocksize, cryp->payload_in); } @@ -1981,8 +1981,8 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp) */ readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32)); - scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize, - cryp->payload_out), 1); + memcpy_to_scatterwalk(&cryp->out_walk, block, min_t(size_t, cryp->hw_blocksize, + cryp->payload_out)); cryp->payload_out -= min_t(size_t, cryp->hw_blocksize, cryp->payload_out); @@ -2079,8 +2079,8 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp) */ readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32)); - scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize, - cryp->payload_out), 1); + memcpy_to_scatterwalk(&cryp->out_walk, block, min_t(size_t, cryp->hw_blocksize, + cryp->payload_out)); cryp->payload_out -= min_t(size_t, cryp->hw_blocksize, cryp->payload_out); /* d) Load again CRYP_CSGCMCCMxR */ @@ -2161,7 +2161,7 @@ static void stm32_cryp_irq_write_gcmccm_header(struct stm32_cryp *cryp) written = min_t(size_t, AES_BLOCK_SIZE, cryp->header_in); - scatterwalk_copychunks(block, &cryp->in_walk, written, 0); + memcpy_from_scatterwalk(block, &cryp->in_walk, written); writesl(cryp->regs + cryp->caps->din, block, AES_BLOCK_32); diff --git a/drivers/crypto/tegra/tegra-se-aes.c b/drivers/crypto/tegra/tegra-se-aes.c index d734c9a56786..ca9d0cca1f74 100644 --- a/drivers/crypto/tegra/tegra-se-aes.c +++ b/drivers/crypto/tegra/tegra-se-aes.c @@ -28,6 +28,9 @@ struct tegra_aes_ctx { u32 ivsize; u32 key1_id; u32 key2_id; + u32 keylen; + u8 key1[AES_MAX_KEY_SIZE]; + u8 key2[AES_MAX_KEY_SIZE]; }; struct tegra_aes_reqctx { @@ -43,8 +46,9 @@ struct tegra_aead_ctx { struct tegra_se *se; unsigned int authsize; u32 alg; - u32 keylen; u32 key_id; + u32 keylen; + u8 key[AES_MAX_KEY_SIZE]; }; struct tegra_aead_reqctx { @@ -56,8 +60,8 @@ struct tegra_aead_reqctx { unsigned int cryptlen; unsigned int authsize; bool encrypt; - u32 config; u32 crypto_config; + u32 config; u32 key_id; u32 iv[4]; u8 authdata[16]; @@ -67,6 +71,8 @@ struct tegra_cmac_ctx { struct tegra_se *se; unsigned int alg; u32 key_id; + u32 keylen; + u8 key[AES_MAX_KEY_SIZE]; struct crypto_shash *fallback_tfm; }; @@ -260,17 +266,13 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq) struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req); struct tegra_se *se = ctx->se; - unsigned int cmdlen; + unsigned int cmdlen, key1_id, key2_id; int ret; - rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_AES_BUFLEN, - &rctx->datbuf.addr, GFP_KERNEL); - if (!rctx->datbuf.buf) - return -ENOMEM; - - rctx->datbuf.size = SE_AES_BUFLEN; rctx->iv = (u32 *)req->iv; rctx->len = req->cryptlen; + key1_id = ctx->key1_id; + key2_id = ctx->key2_id; /* Pad input to AES Block size */ if (ctx->alg != SE_ALG_XTS) { @@ -278,20 +280,59 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq) rctx->len += AES_BLOCK_SIZE - (rctx->len % AES_BLOCK_SIZE); } + rctx->datbuf.size = rctx->len; + rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) { + ret = -ENOMEM; + goto out_finalize; + } + scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0); + rctx->config = tegra234_aes_cfg(ctx->alg, rctx->encrypt); + rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, rctx->encrypt); + + if (!key1_id) { + ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key1, + ctx->keylen, ctx->alg, &key1_id); + if (ret) + goto out; + } + + rctx->crypto_config |= SE_AES_KEY_INDEX(key1_id); + + if (ctx->alg == SE_ALG_XTS) { + if (!key2_id) { + ret = tegra_key_submit_reserved_xts(ctx->se, ctx->key2, + ctx->keylen, ctx->alg, &key2_id); + if (ret) + goto out; + } + + rctx->crypto_config |= SE_AES_KEY2_INDEX(key2_id); + } + /* Prepare the command and submit for execution */ cmdlen = tegra_aes_prep_cmd(ctx, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); /* Copy the result */ tegra_aes_update_iv(req, ctx); scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1); +out: /* Free the buffer */ - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + dma_free_coherent(ctx->se->dev, rctx->datbuf.size, rctx->datbuf.buf, rctx->datbuf.addr); + if (tegra_key_is_reserved(key1_id)) + tegra_key_invalidate_reserved(ctx->se, key1_id, ctx->alg); + + if (tegra_key_is_reserved(key2_id)) + tegra_key_invalidate_reserved(ctx->se, key2_id, ctx->alg); + +out_finalize: crypto_finalize_skcipher_request(se->engine, req, ret); return 0; @@ -313,6 +354,7 @@ static int tegra_aes_cra_init(struct crypto_skcipher *tfm) ctx->se = se_alg->se_dev; ctx->key1_id = 0; ctx->key2_id = 0; + ctx->keylen = 0; algname = crypto_tfm_alg_name(&tfm->base); ret = se_algname_to_algid(algname); @@ -341,13 +383,20 @@ static int tegra_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, u32 keylen) { struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + int ret; if (aes_check_keylen(keylen)) { dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen); return -EINVAL; } - return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id); + ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id); + if (ret) { + ctx->keylen = keylen; + memcpy(ctx->key1, key, keylen); + } + + return 0; } static int tegra_xts_setkey(struct crypto_skcipher *tfm, @@ -365,11 +414,17 @@ static int tegra_xts_setkey(struct crypto_skcipher *tfm, ret = tegra_key_submit(ctx->se, key, len, ctx->alg, &ctx->key1_id); - if (ret) - return ret; + if (ret) { + ctx->keylen = len; + memcpy(ctx->key1, key, len); + } - return tegra_key_submit(ctx->se, key + len, len, + ret = tegra_key_submit(ctx->se, key + len, len, ctx->alg, &ctx->key2_id); + if (ret) { + ctx->keylen = len; + memcpy(ctx->key2, key + len, len); + } return 0; } @@ -443,13 +498,10 @@ static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt) if (!req->cryptlen) return 0; - rctx->encrypt = encrypt; - rctx->config = tegra234_aes_cfg(ctx->alg, encrypt); - rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, encrypt); - rctx->crypto_config |= SE_AES_KEY_INDEX(ctx->key1_id); + if (ctx->alg == SE_ALG_ECB) + req->iv = NULL; - if (ctx->key2_id) - rctx->crypto_config |= SE_AES_KEY2_INDEX(ctx->key2_id); + rctx->encrypt = encrypt; return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req); } @@ -715,11 +767,11 @@ static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqct rctx->config = tegra234_aes_cfg(SE_ALG_GMAC, rctx->encrypt); rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GMAC, rctx->encrypt) | - SE_AES_KEY_INDEX(ctx->key_id); + SE_AES_KEY_INDEX(rctx->key_id); cmdlen = tegra_gmac_prep_cmd(ctx, rctx); - return tegra_se_host1x_submit(se, cmdlen); + return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); } static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx) @@ -732,11 +784,11 @@ static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc rctx->config = tegra234_aes_cfg(SE_ALG_GCM, rctx->encrypt); rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM, rctx->encrypt) | - SE_AES_KEY_INDEX(ctx->key_id); + SE_AES_KEY_INDEX(rctx->key_id); /* Prepare command and submit */ cmdlen = tegra_gcm_crypt_prep_cmd(ctx, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); if (ret) return ret; @@ -755,11 +807,11 @@ static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc rctx->config = tegra234_aes_cfg(SE_ALG_GCM_FINAL, rctx->encrypt); rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM_FINAL, rctx->encrypt) | - SE_AES_KEY_INDEX(ctx->key_id); + SE_AES_KEY_INDEX(rctx->key_id); /* Prepare command and submit */ cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); if (ret) return ret; @@ -886,12 +938,12 @@ static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_req rctx->config = tegra234_aes_cfg(SE_ALG_CBC_MAC, rctx->encrypt); rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CBC_MAC, rctx->encrypt) | - SE_AES_KEY_INDEX(ctx->key_id); + SE_AES_KEY_INDEX(rctx->key_id); /* Prepare command and submit */ cmdlen = tegra_cbcmac_prep_cmd(ctx, rctx); - return tegra_se_host1x_submit(se, cmdlen); + return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); } static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize) @@ -1073,7 +1125,7 @@ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx rctx->config = tegra234_aes_cfg(SE_ALG_CTR, rctx->encrypt); rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CTR, rctx->encrypt) | - SE_AES_KEY_INDEX(ctx->key_id); + SE_AES_KEY_INDEX(rctx->key_id); /* Copy authdata in the top of buffer for encryption/decryption */ if (rctx->encrypt) @@ -1098,7 +1150,7 @@ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx /* Prepare command and submit */ cmdlen = tegra_ctr_prep_cmd(ctx, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); if (ret) return ret; @@ -1117,6 +1169,11 @@ static int tegra_ccm_crypt_init(struct aead_request *req, struct tegra_se *se, rctx->assoclen = req->assoclen; rctx->authsize = crypto_aead_authsize(tfm); + if (rctx->encrypt) + rctx->cryptlen = req->cryptlen; + else + rctx->cryptlen = req->cryptlen - rctx->authsize; + memcpy(iv, req->iv, 16); ret = tegra_ccm_check_iv(iv); @@ -1145,30 +1202,35 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) struct tegra_se *se = ctx->se; int ret; + ret = tegra_ccm_crypt_init(req, se, rctx); + if (ret) + goto out_finalize; + + rctx->key_id = ctx->key_id; + /* Allocate buffers required */ - rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, + rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100; + rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size, &rctx->inbuf.addr, GFP_KERNEL); if (!rctx->inbuf.buf) - return -ENOMEM; - - rctx->inbuf.size = SE_AES_BUFLEN; + goto out_finalize; - rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, + rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100; + rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size, &rctx->outbuf.addr, GFP_KERNEL); if (!rctx->outbuf.buf) { ret = -ENOMEM; - goto outbuf_err; + goto out_free_inbuf; } - rctx->outbuf.size = SE_AES_BUFLEN; - - ret = tegra_ccm_crypt_init(req, se, rctx); - if (ret) - goto out; + if (!ctx->key_id) { + ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key, + ctx->keylen, ctx->alg, &rctx->key_id); + if (ret) + goto out; + } if (rctx->encrypt) { - rctx->cryptlen = req->cryptlen; - /* CBC MAC Operation */ ret = tegra_ccm_compute_auth(ctx, rctx); if (ret) @@ -1179,8 +1241,6 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) if (ret) goto out; } else { - rctx->cryptlen = req->cryptlen - ctx->authsize; - /* CTR operation */ ret = tegra_ccm_do_ctr(ctx, rctx); if (ret) @@ -1193,13 +1253,17 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq) } out: - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + dma_free_coherent(ctx->se->dev, rctx->inbuf.size, rctx->outbuf.buf, rctx->outbuf.addr); -outbuf_err: - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, +out_free_inbuf: + dma_free_coherent(ctx->se->dev, rctx->outbuf.size, rctx->inbuf.buf, rctx->inbuf.addr); + if (tegra_key_is_reserved(rctx->key_id)) + tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg); + +out_finalize: crypto_finalize_aead_request(ctx->se->engine, req, ret); return 0; @@ -1213,23 +1277,6 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq) struct tegra_aead_reqctx *rctx = aead_request_ctx(req); int ret; - /* Allocate buffers required */ - rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, - &rctx->inbuf.addr, GFP_KERNEL); - if (!rctx->inbuf.buf) - return -ENOMEM; - - rctx->inbuf.size = SE_AES_BUFLEN; - - rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN, - &rctx->outbuf.addr, GFP_KERNEL); - if (!rctx->outbuf.buf) { - ret = -ENOMEM; - goto outbuf_err; - } - - rctx->outbuf.size = SE_AES_BUFLEN; - rctx->src_sg = req->src; rctx->dst_sg = req->dst; rctx->assoclen = req->assoclen; @@ -1243,6 +1290,32 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq) memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE); rctx->iv[3] = (1 << 24); + rctx->key_id = ctx->key_id; + + /* Allocate buffers required */ + rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen; + rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size, + &rctx->inbuf.addr, GFP_KERNEL); + if (!rctx->inbuf.buf) { + ret = -ENOMEM; + goto out_finalize; + } + + rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen; + rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size, + &rctx->outbuf.addr, GFP_KERNEL); + if (!rctx->outbuf.buf) { + ret = -ENOMEM; + goto out_free_inbuf; + } + + if (!ctx->key_id) { + ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key, + ctx->keylen, ctx->alg, &rctx->key_id); + if (ret) + goto out; + } + /* If there is associated data perform GMAC operation */ if (rctx->assoclen) { ret = tegra_gcm_do_gmac(ctx, rctx); @@ -1266,14 +1339,17 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq) ret = tegra_gcm_do_verify(ctx->se, rctx); out: - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, + dma_free_coherent(ctx->se->dev, rctx->outbuf.size, rctx->outbuf.buf, rctx->outbuf.addr); -outbuf_err: - dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN, +out_free_inbuf: + dma_free_coherent(ctx->se->dev, rctx->inbuf.size, rctx->inbuf.buf, rctx->inbuf.addr); - /* Finalize the request if there are no errors */ + if (tegra_key_is_reserved(rctx->key_id)) + tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg); + +out_finalize: crypto_finalize_aead_request(ctx->se->engine, req, ret); return 0; @@ -1295,6 +1371,7 @@ static int tegra_aead_cra_init(struct crypto_aead *tfm) ctx->se = se_alg->se_dev; ctx->key_id = 0; + ctx->keylen = 0; ret = se_algname_to_algid(algname); if (ret < 0) { @@ -1376,13 +1453,20 @@ static int tegra_aead_setkey(struct crypto_aead *tfm, const u8 *key, u32 keylen) { struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm); + int ret; if (aes_check_keylen(keylen)) { dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen); return -EINVAL; } - return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); + ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); + if (ret) { + ctx->keylen = keylen; + memcpy(ctx->key, key, keylen); + } + + return 0; } static unsigned int tegra_cmac_prep_cmd(struct tegra_cmac_ctx *ctx, @@ -1456,6 +1540,35 @@ static void tegra_cmac_paste_result(struct tegra_se *se, struct tegra_cmac_reqct se->base + se->hw->regs->result + (i * 4)); } +static int tegra_cmac_do_init(struct ahash_request *req) +{ + struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_se *se = ctx->se; + int i; + + rctx->total_len = 0; + rctx->datbuf.size = 0; + rctx->residue.size = 0; + rctx->key_id = ctx->key_id; + rctx->task |= SHA_FIRST; + rctx->blk_size = crypto_ahash_blocksize(tfm); + + rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2, + &rctx->residue.addr, GFP_KERNEL); + if (!rctx->residue.buf) + return -ENOMEM; + + rctx->residue.size = 0; + + /* Clear any previous result */ + for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) + writel(0, se->base + se->hw->regs->result + (i * 4)); + + return 0; +} + static int tegra_cmac_do_update(struct ahash_request *req) { struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); @@ -1483,7 +1596,7 @@ static int tegra_cmac_do_update(struct ahash_request *req) rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue; rctx->total_len += rctx->datbuf.size; rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0); - rctx->crypto_config = SE_AES_KEY_INDEX(ctx->key_id); + rctx->crypto_config = SE_AES_KEY_INDEX(rctx->key_id); /* * Keep one block and residue bytes in residue and @@ -1497,6 +1610,11 @@ static int tegra_cmac_do_update(struct ahash_request *req) return 0; } + rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) + return -ENOMEM; + /* Copy the previous residue first */ if (rctx->residue.size) memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); @@ -1511,23 +1629,19 @@ static int tegra_cmac_do_update(struct ahash_request *req) rctx->residue.size = nresidue; /* - * If this is not the first 'update' call, paste the previous copied + * If this is not the first task, paste the previous copied * intermediate results to the registers so that it gets picked up. - * This is to support the import/export functionality. */ if (!(rctx->task & SHA_FIRST)) tegra_cmac_paste_result(ctx->se, rctx); cmdlen = tegra_cmac_prep_cmd(ctx, rctx); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); - ret = tegra_se_host1x_submit(se, cmdlen); - /* - * If this is not the final update, copy the intermediate results - * from the registers so that it can be used in the next 'update' - * call. This is to support the import/export functionality. - */ - if (!(rctx->task & SHA_FINAL)) - tegra_cmac_copy_result(ctx->se, rctx); + tegra_cmac_copy_result(ctx->se, rctx); + + dma_free_coherent(ctx->se->dev, rctx->datbuf.size, + rctx->datbuf.buf, rctx->datbuf.addr); return ret; } @@ -1543,17 +1657,34 @@ static int tegra_cmac_do_final(struct ahash_request *req) if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) { return crypto_shash_tfm_digest(ctx->fallback_tfm, - rctx->datbuf.buf, 0, req->result); + NULL, 0, req->result); + } + + if (rctx->residue.size) { + rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) { + ret = -ENOMEM; + goto out_free; + } + + memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); } - memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); rctx->datbuf.size = rctx->residue.size; rctx->total_len += rctx->residue.size; rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0); + /* + * If this is not the first task, paste the previous copied + * intermediate results to the registers so that it gets picked up. + */ + if (!(rctx->task & SHA_FIRST)) + tegra_cmac_paste_result(ctx->se, rctx); + /* Prepare command and submit */ cmdlen = tegra_cmac_prep_cmd(ctx, rctx); - ret = tegra_se_host1x_submit(se, cmdlen); + ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen); if (ret) goto out; @@ -1565,8 +1696,10 @@ static int tegra_cmac_do_final(struct ahash_request *req) writel(0, se->base + se->hw->regs->result + (i * 4)); out: - dma_free_coherent(se->dev, SE_SHA_BUFLEN, - rctx->datbuf.buf, rctx->datbuf.addr); + if (rctx->residue.size) + dma_free_coherent(se->dev, rctx->datbuf.size, + rctx->datbuf.buf, rctx->datbuf.addr); +out_free: dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2, rctx->residue.buf, rctx->residue.addr); return ret; @@ -1579,17 +1712,41 @@ static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); struct tegra_se *se = ctx->se; - int ret; + int ret = 0; + + if (rctx->task & SHA_INIT) { + ret = tegra_cmac_do_init(req); + if (ret) + goto out; + + rctx->task &= ~SHA_INIT; + } + + if (!ctx->key_id) { + ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key, + ctx->keylen, ctx->alg, &rctx->key_id); + if (ret) + goto out; + } if (rctx->task & SHA_UPDATE) { ret = tegra_cmac_do_update(req); + if (ret) + goto out; + rctx->task &= ~SHA_UPDATE; } if (rctx->task & SHA_FINAL) { ret = tegra_cmac_do_final(req); + if (ret) + goto out; + rctx->task &= ~SHA_FINAL; } +out: + if (tegra_key_is_reserved(rctx->key_id)) + tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg); crypto_finalize_hash_request(se->engine, req, ret); @@ -1631,6 +1788,7 @@ static int tegra_cmac_cra_init(struct crypto_tfm *tfm) ctx->se = se_alg->se_dev; ctx->key_id = 0; + ctx->keylen = 0; ret = se_algname_to_algid(algname); if (ret < 0) { @@ -1655,51 +1813,11 @@ static void tegra_cmac_cra_exit(struct crypto_tfm *tfm) tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg); } -static int tegra_cmac_init(struct ahash_request *req) -{ - struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); - struct tegra_se *se = ctx->se; - int i; - - rctx->total_len = 0; - rctx->datbuf.size = 0; - rctx->residue.size = 0; - rctx->task = SHA_FIRST; - rctx->blk_size = crypto_ahash_blocksize(tfm); - - rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2, - &rctx->residue.addr, GFP_KERNEL); - if (!rctx->residue.buf) - goto resbuf_fail; - - rctx->residue.size = 0; - - rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN, - &rctx->datbuf.addr, GFP_KERNEL); - if (!rctx->datbuf.buf) - goto datbuf_fail; - - rctx->datbuf.size = 0; - - /* Clear any previous result */ - for (i = 0; i < CMAC_RESULT_REG_COUNT; i++) - writel(0, se->base + se->hw->regs->result + (i * 4)); - - return 0; - -datbuf_fail: - dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf, - rctx->residue.addr); -resbuf_fail: - return -ENOMEM; -} - static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + int ret; if (aes_check_keylen(keylen)) { dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen); @@ -1709,7 +1827,24 @@ static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, if (ctx->fallback_tfm) crypto_shash_setkey(ctx->fallback_tfm, key, keylen); - return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); + ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); + if (ret) { + ctx->keylen = keylen; + memcpy(ctx->key, key, keylen); + } + + return 0; +} + +static int tegra_cmac_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); + + rctx->task = SHA_INIT; + + return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); } static int tegra_cmac_update(struct ahash_request *req) @@ -1750,13 +1885,9 @@ static int tegra_cmac_digest(struct ahash_request *req) struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm); struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req); - int ret; - ret = tegra_cmac_init(req); - if (ret) - return ret; + rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL; - rctx->task |= SHA_UPDATE | SHA_FINAL; return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); } diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c index 0b5cdd5676b1..42d007b7af45 100644 --- a/drivers/crypto/tegra/tegra-se-hash.c +++ b/drivers/crypto/tegra/tegra-se-hash.c @@ -34,6 +34,7 @@ struct tegra_sha_reqctx { struct tegra_se_datbuf datbuf; struct tegra_se_datbuf residue; struct tegra_se_datbuf digest; + struct tegra_se_datbuf intr_res; unsigned int alg; unsigned int config; unsigned int total_len; @@ -211,9 +212,62 @@ static int tegra_sha_fallback_export(struct ahash_request *req, void *out) return crypto_ahash_export(&rctx->fallback_req, out); } -static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, +static int tegra_se_insert_hash_result(struct tegra_sha_ctx *ctx, u32 *cpuvaddr, + struct tegra_sha_reqctx *rctx) +{ + __be32 *res_be = (__be32 *)rctx->intr_res.buf; + u32 *res = (u32 *)rctx->intr_res.buf; + int i = 0, j; + + cpuvaddr[i++] = 0; + cpuvaddr[i++] = host1x_opcode_setpayload(HASH_RESULT_REG_COUNT); + cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_HASH_RESULT); + + for (j = 0; j < HASH_RESULT_REG_COUNT; j++) { + int idx = j; + + /* + * The initial, intermediate and final hash value of SHA-384, SHA-512 + * in SHA_HASH_RESULT registers follow the below layout of bytes. + * + * +---------------+------------+ + * | HASH_RESULT_0 | B4...B7 | + * +---------------+------------+ + * | HASH_RESULT_1 | B0...B3 | + * +---------------+------------+ + * | HASH_RESULT_2 | B12...B15 | + * +---------------+------------+ + * | HASH_RESULT_3 | B8...B11 | + * +---------------+------------+ + * | ...... | + * +---------------+------------+ + * | HASH_RESULT_14| B60...B63 | + * +---------------+------------+ + * | HASH_RESULT_15| B56...B59 | + * +---------------+------------+ + * + */ + if (ctx->alg == SE_ALG_SHA384 || ctx->alg == SE_ALG_SHA512) + idx = (j % 2) ? j - 1 : j + 1; + + /* For SHA-1, SHA-224, SHA-256, SHA-384, SHA-512 the initial + * intermediate and final hash value when stored in + * SHA_HASH_RESULT registers, the byte order is NOT in + * little-endian. + */ + if (ctx->alg <= SE_ALG_SHA512) + cpuvaddr[i++] = be32_to_cpu(res_be[idx]); + else + cpuvaddr[i++] = res[idx]; + } + + return i; +} + +static int tegra_sha_prep_cmd(struct tegra_sha_ctx *ctx, u32 *cpuvaddr, struct tegra_sha_reqctx *rctx) { + struct tegra_se *se = ctx->se; u64 msg_len, msg_left; int i = 0; @@ -241,7 +295,7 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, cpuvaddr[i++] = upper_32_bits(msg_left); cpuvaddr[i++] = 0; cpuvaddr[i++] = 0; - cpuvaddr[i++] = host1x_opcode_setpayload(6); + cpuvaddr[i++] = host1x_opcode_setpayload(2); cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_CFG); cpuvaddr[i++] = rctx->config; @@ -249,15 +303,29 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, cpuvaddr[i++] = SE_SHA_TASK_HASH_INIT; rctx->task &= ~SHA_FIRST; } else { - cpuvaddr[i++] = 0; + /* + * If it isn't the first task, program the HASH_RESULT register + * with the intermediate result from the previous task + */ + i += tegra_se_insert_hash_result(ctx, cpuvaddr + i, rctx); } + cpuvaddr[i++] = host1x_opcode_setpayload(4); + cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_IN_ADDR); cpuvaddr[i++] = rctx->datbuf.addr; cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) | SE_ADDR_HI_SZ(rctx->datbuf.size)); - cpuvaddr[i++] = rctx->digest.addr; - cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->digest.addr)) | - SE_ADDR_HI_SZ(rctx->digest.size)); + + if (rctx->task & SHA_UPDATE) { + cpuvaddr[i++] = rctx->intr_res.addr; + cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->intr_res.addr)) | + SE_ADDR_HI_SZ(rctx->intr_res.size)); + } else { + cpuvaddr[i++] = rctx->digest.addr; + cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->digest.addr)) | + SE_ADDR_HI_SZ(rctx->digest.size)); + } + if (rctx->key_id) { cpuvaddr[i++] = host1x_opcode_setpayload(1); cpuvaddr[i++] = se_host1x_opcode_nonincr_w(SE_SHA_CRYPTO_CFG); @@ -266,42 +334,72 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr, cpuvaddr[i++] = host1x_opcode_setpayload(1); cpuvaddr[i++] = se_host1x_opcode_nonincr_w(SE_SHA_OPERATION); - cpuvaddr[i++] = SE_SHA_OP_WRSTALL | - SE_SHA_OP_START | + cpuvaddr[i++] = SE_SHA_OP_WRSTALL | SE_SHA_OP_START | SE_SHA_OP_LASTBUF; cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1); cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) | host1x_uclass_incr_syncpt_indx_f(se->syncpt_id); - dev_dbg(se->dev, "msg len %llu msg left %llu cfg %#x", - msg_len, msg_left, rctx->config); + dev_dbg(se->dev, "msg len %llu msg left %llu sz %zd cfg %#x", + msg_len, msg_left, rctx->datbuf.size, rctx->config); return i; } -static void tegra_sha_copy_hash_result(struct tegra_se *se, struct tegra_sha_reqctx *rctx) +static int tegra_sha_do_init(struct ahash_request *req) { - int i; + struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + struct tegra_se *se = ctx->se; - for (i = 0; i < HASH_RESULT_REG_COUNT; i++) - rctx->result[i] = readl(se->base + se->hw->regs->result + (i * 4)); -} + if (ctx->fallback) + return tegra_sha_fallback_init(req); -static void tegra_sha_paste_hash_result(struct tegra_se *se, struct tegra_sha_reqctx *rctx) -{ - int i; + rctx->total_len = 0; + rctx->datbuf.size = 0; + rctx->residue.size = 0; + rctx->key_id = ctx->key_id; + rctx->task |= SHA_FIRST; + rctx->alg = ctx->alg; + rctx->blk_size = crypto_ahash_blocksize(tfm); + rctx->digest.size = crypto_ahash_digestsize(tfm); + + rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size, + &rctx->digest.addr, GFP_KERNEL); + if (!rctx->digest.buf) + goto digbuf_fail; + + rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size, + &rctx->residue.addr, GFP_KERNEL); + if (!rctx->residue.buf) + goto resbuf_fail; + + rctx->intr_res.size = HASH_RESULT_REG_COUNT * 4; + rctx->intr_res.buf = dma_alloc_coherent(se->dev, rctx->intr_res.size, + &rctx->intr_res.addr, GFP_KERNEL); + if (!rctx->intr_res.buf) + goto intr_res_fail; + + return 0; - for (i = 0; i < HASH_RESULT_REG_COUNT; i++) - writel(rctx->result[i], - se->base + se->hw->regs->result + (i * 4)); +intr_res_fail: + dma_free_coherent(se->dev, rctx->residue.size, rctx->residue.buf, + rctx->residue.addr); +resbuf_fail: + dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf, + rctx->digest.addr); +digbuf_fail: + return -ENOMEM; } static int tegra_sha_do_update(struct ahash_request *req) { struct tegra_sha_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct tegra_se *se = ctx->se; unsigned int nblks, nresidue, size, ret; - u32 *cpuvaddr = ctx->se->cmdbuf->addr; + u32 *cpuvaddr = se->cmdbuf->addr; nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size; nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size; @@ -317,7 +415,6 @@ static int tegra_sha_do_update(struct ahash_request *req) rctx->src_sg = req->src; rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue; - rctx->total_len += rctx->datbuf.size; /* * If nbytes are less than a block size, copy it residue and @@ -326,11 +423,16 @@ static int tegra_sha_do_update(struct ahash_request *req) if (nblks < 1) { scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size, rctx->src_sg, 0, req->nbytes, 0); - rctx->residue.size += req->nbytes; + return 0; } + rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) + return -ENOMEM; + /* Copy the previous residue first */ if (rctx->residue.size) memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); @@ -343,29 +445,16 @@ static int tegra_sha_do_update(struct ahash_request *req) /* Update residue value with the residue after current block */ rctx->residue.size = nresidue; + rctx->total_len += rctx->datbuf.size; rctx->config = tegra_sha_get_config(rctx->alg) | - SE_SHA_DST_HASH_REG; - - /* - * If this is not the first 'update' call, paste the previous copied - * intermediate results to the registers so that it gets picked up. - * This is to support the import/export functionality. - */ - if (!(rctx->task & SHA_FIRST)) - tegra_sha_paste_hash_result(ctx->se, rctx); + SE_SHA_DST_MEMORY; - size = tegra_sha_prep_cmd(ctx->se, cpuvaddr, rctx); + size = tegra_sha_prep_cmd(ctx, cpuvaddr, rctx); + ret = tegra_se_host1x_submit(se, se->cmdbuf, size); - ret = tegra_se_host1x_submit(ctx->se, size); - - /* - * If this is not the final update, copy the intermediate results - * from the registers so that it can be used in the next 'update' - * call. This is to support the import/export functionality. - */ - if (!(rctx->task & SHA_FINAL)) - tegra_sha_copy_hash_result(ctx->se, rctx); + dma_free_coherent(se->dev, rctx->datbuf.size, + rctx->datbuf.buf, rctx->datbuf.addr); return ret; } @@ -379,16 +468,25 @@ static int tegra_sha_do_final(struct ahash_request *req) u32 *cpuvaddr = se->cmdbuf->addr; int size, ret = 0; - memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); + if (rctx->residue.size) { + rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size, + &rctx->datbuf.addr, GFP_KERNEL); + if (!rctx->datbuf.buf) { + ret = -ENOMEM; + goto out_free; + } + + memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size); + } + rctx->datbuf.size = rctx->residue.size; rctx->total_len += rctx->residue.size; rctx->config = tegra_sha_get_config(rctx->alg) | SE_SHA_DST_MEMORY; - size = tegra_sha_prep_cmd(se, cpuvaddr, rctx); - - ret = tegra_se_host1x_submit(se, size); + size = tegra_sha_prep_cmd(ctx, cpuvaddr, rctx); + ret = tegra_se_host1x_submit(se, se->cmdbuf, size); if (ret) goto out; @@ -396,12 +494,18 @@ static int tegra_sha_do_final(struct ahash_request *req) memcpy(req->result, rctx->digest.buf, rctx->digest.size); out: - dma_free_coherent(se->dev, SE_SHA_BUFLEN, - rctx->datbuf.buf, rctx->datbuf.addr); + if (rctx->residue.size) + dma_free_coherent(se->dev, rctx->datbuf.size, + rctx->datbuf.buf, rctx->datbuf.addr); +out_free: dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm), rctx->residue.buf, rctx->residue.addr); dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf, rctx->digest.addr); + + dma_free_coherent(se->dev, rctx->intr_res.size, rctx->intr_res.buf, + rctx->intr_res.addr); + return ret; } @@ -414,16 +518,31 @@ static int tegra_sha_do_one_req(struct crypto_engine *engine, void *areq) struct tegra_se *se = ctx->se; int ret = 0; + if (rctx->task & SHA_INIT) { + ret = tegra_sha_do_init(req); + if (ret) + goto out; + + rctx->task &= ~SHA_INIT; + } + if (rctx->task & SHA_UPDATE) { ret = tegra_sha_do_update(req); + if (ret) + goto out; + rctx->task &= ~SHA_UPDATE; } if (rctx->task & SHA_FINAL) { ret = tegra_sha_do_final(req); + if (ret) + goto out; + rctx->task &= ~SHA_FINAL; } +out: crypto_finalize_hash_request(se->engine, req, ret); return 0; @@ -497,52 +616,6 @@ static void tegra_sha_cra_exit(struct crypto_tfm *tfm) tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg); } -static int tegra_sha_init(struct ahash_request *req) -{ - struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); - struct tegra_se *se = ctx->se; - - if (ctx->fallback) - return tegra_sha_fallback_init(req); - - rctx->total_len = 0; - rctx->datbuf.size = 0; - rctx->residue.size = 0; - rctx->key_id = ctx->key_id; - rctx->task = SHA_FIRST; - rctx->alg = ctx->alg; - rctx->blk_size = crypto_ahash_blocksize(tfm); - rctx->digest.size = crypto_ahash_digestsize(tfm); - - rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size, - &rctx->digest.addr, GFP_KERNEL); - if (!rctx->digest.buf) - goto digbuf_fail; - - rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size, - &rctx->residue.addr, GFP_KERNEL); - if (!rctx->residue.buf) - goto resbuf_fail; - - rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN, - &rctx->datbuf.addr, GFP_KERNEL); - if (!rctx->datbuf.buf) - goto datbuf_fail; - - return 0; - -datbuf_fail: - dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf, - rctx->residue.addr); -resbuf_fail: - dma_free_coherent(se->dev, SE_SHA_BUFLEN, rctx->datbuf.buf, - rctx->datbuf.addr); -digbuf_fail: - return -ENOMEM; -} - static int tegra_hmac_fallback_setkey(struct tegra_sha_ctx *ctx, const u8 *key, unsigned int keylen) { @@ -559,13 +632,29 @@ static int tegra_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + int ret; if (aes_check_keylen(keylen)) return tegra_hmac_fallback_setkey(ctx, key, keylen); + ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); + if (ret) + return tegra_hmac_fallback_setkey(ctx, key, keylen); + ctx->fallback = false; - return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id); + return 0; +} + +static int tegra_sha_init(struct ahash_request *req) +{ + struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); + + rctx->task = SHA_INIT; + + return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); } static int tegra_sha_update(struct ahash_request *req) @@ -615,16 +704,12 @@ static int tegra_sha_digest(struct ahash_request *req) struct tegra_sha_reqctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm); - int ret; if (ctx->fallback) return tegra_sha_fallback_digest(req); - ret = tegra_sha_init(req); - if (ret) - return ret; + rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL; - rctx->task |= SHA_UPDATE | SHA_FINAL; return crypto_transfer_hash_request_to_engine(ctx->se->engine, req); } diff --git a/drivers/crypto/tegra/tegra-se-key.c b/drivers/crypto/tegra/tegra-se-key.c index ac14678dbd30..956fa9b4e9b1 100644 --- a/drivers/crypto/tegra/tegra-se-key.c +++ b/drivers/crypto/tegra/tegra-se-key.c @@ -115,11 +115,17 @@ static int tegra_key_insert(struct tegra_se *se, const u8 *key, u32 keylen, u16 slot, u32 alg) { const u32 *keyval = (u32 *)key; - u32 *addr = se->cmdbuf->addr, size; + u32 *addr = se->keybuf->addr, size; + int ret; + + mutex_lock(&kslt_lock); size = tegra_key_prep_ins_cmd(se, addr, keyval, keylen, slot, alg); + ret = tegra_se_host1x_submit(se, se->keybuf, size); + + mutex_unlock(&kslt_lock); - return tegra_se_host1x_submit(se, size); + return ret; } void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg) @@ -135,6 +141,23 @@ void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg) tegra_keyslot_free(keyid); } +void tegra_key_invalidate_reserved(struct tegra_se *se, u32 keyid, u32 alg) +{ + u8 zkey[AES_MAX_KEY_SIZE] = {0}; + + if (!keyid) + return; + + /* Overwrite the key with 0s */ + tegra_key_insert(se, zkey, AES_MAX_KEY_SIZE, keyid, alg); +} + +inline int tegra_key_submit_reserved(struct tegra_se *se, const u8 *key, + u32 keylen, u32 alg, u32 *keyid) +{ + return tegra_key_insert(se, key, keylen, *keyid, alg); +} + int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u32 *keyid) { int ret; @@ -143,7 +166,7 @@ int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u3 if (!tegra_key_in_kslt(*keyid)) { *keyid = tegra_keyslot_alloc(); if (!(*keyid)) { - dev_err(se->dev, "failed to allocate key slot\n"); + dev_dbg(se->dev, "failed to allocate key slot\n"); return -ENOMEM; } } diff --git a/drivers/crypto/tegra/tegra-se-main.c b/drivers/crypto/tegra/tegra-se-main.c index 918c0b10614d..1c94f1de0546 100644 --- a/drivers/crypto/tegra/tegra-se-main.c +++ b/drivers/crypto/tegra/tegra-se-main.c @@ -141,7 +141,7 @@ static struct tegra_se_cmdbuf *tegra_se_host1x_bo_alloc(struct tegra_se *se, ssi return cmdbuf; } -int tegra_se_host1x_submit(struct tegra_se *se, u32 size) +int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size) { struct host1x_job *job; int ret; @@ -160,9 +160,9 @@ int tegra_se_host1x_submit(struct tegra_se *se, u32 size) job->engine_fallback_streamid = se->stream_id; job->engine_streamid_offset = SE_STREAM_ID; - se->cmdbuf->words = size; + cmdbuf->words = size; - host1x_job_add_gather(job, &se->cmdbuf->bo, size, 0); + host1x_job_add_gather(job, &cmdbuf->bo, size, 0); ret = host1x_job_pin(job, se->dev); if (ret) { @@ -220,14 +220,22 @@ static int tegra_se_client_init(struct host1x_client *client) goto syncpt_put; } + se->keybuf = tegra_se_host1x_bo_alloc(se, SZ_4K); + if (!se->keybuf) { + ret = -ENOMEM; + goto cmdbuf_put; + } + ret = se->hw->init_alg(se); if (ret) { dev_err(se->dev, "failed to register algorithms\n"); - goto cmdbuf_put; + goto keybuf_put; } return 0; +keybuf_put: + tegra_se_cmdbuf_put(&se->keybuf->bo); cmdbuf_put: tegra_se_cmdbuf_put(&se->cmdbuf->bo); syncpt_put: diff --git a/drivers/crypto/tegra/tegra-se.h b/drivers/crypto/tegra/tegra-se.h index b9dd7ceb8783..b6cac9384f66 100644 --- a/drivers/crypto/tegra/tegra-se.h +++ b/drivers/crypto/tegra/tegra-se.h @@ -24,6 +24,7 @@ #define SE_STREAM_ID 0x90 #define SE_SHA_CFG 0x4004 +#define SE_SHA_IN_ADDR 0x400c #define SE_SHA_KEY_ADDR 0x4094 #define SE_SHA_KEY_DATA 0x4098 #define SE_SHA_KEYMANIFEST 0x409c @@ -340,12 +341,14 @@ #define SE_CRYPTO_CTR_REG_COUNT 4 #define SE_MAX_KEYSLOT 15 #define SE_MAX_MEM_ALLOC SZ_4M -#define SE_AES_BUFLEN 0x8000 -#define SE_SHA_BUFLEN 0x2000 + +#define TEGRA_AES_RESERVED_KSLT 14 +#define TEGRA_XTS_RESERVED_KSLT 15 #define SHA_FIRST BIT(0) -#define SHA_UPDATE BIT(1) -#define SHA_FINAL BIT(2) +#define SHA_INIT BIT(1) +#define SHA_UPDATE BIT(2) +#define SHA_FINAL BIT(3) /* Security Engine operation modes */ enum se_aes_alg { @@ -420,6 +423,7 @@ struct tegra_se { struct host1x_client client; struct host1x_channel *channel; struct tegra_se_cmdbuf *cmdbuf; + struct tegra_se_cmdbuf *keybuf; struct crypto_engine *engine; struct host1x_syncpt *syncpt; struct device *dev; @@ -501,8 +505,33 @@ void tegra_deinit_aes(struct tegra_se *se); void tegra_deinit_hash(struct tegra_se *se); int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u32 *keyid); + +int tegra_key_submit_reserved(struct tegra_se *se, const u8 *key, + u32 keylen, u32 alg, u32 *keyid); + void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg); -int tegra_se_host1x_submit(struct tegra_se *se, u32 size); +void tegra_key_invalidate_reserved(struct tegra_se *se, u32 keyid, u32 alg); +int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size); + +static inline int tegra_key_submit_reserved_aes(struct tegra_se *se, const u8 *key, + u32 keylen, u32 alg, u32 *keyid) +{ + *keyid = TEGRA_AES_RESERVED_KSLT; + return tegra_key_submit_reserved(se, key, keylen, alg, keyid); +} + +static inline int tegra_key_submit_reserved_xts(struct tegra_se *se, const u8 *key, + u32 keylen, u32 alg, u32 *keyid) +{ + *keyid = TEGRA_XTS_RESERVED_KSLT; + return tegra_key_submit_reserved(se, key, keylen, alg, keyid); +} + +static inline bool tegra_key_is_reserved(u32 keyid) +{ + return ((keyid == TEGRA_AES_RESERVED_KSLT) || + (keyid == TEGRA_XTS_RESERVED_KSLT)); +} /* HOST1x OPCODES */ static inline u32 host1x_opcode_setpayload(unsigned int payload) diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c index 48fee07b7e51..2e44915c9f23 100644 --- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c @@ -21,12 +21,11 @@ #include "virtio_crypto_common.h" struct virtio_crypto_rsa_ctx { - MPI n; + unsigned int key_size; }; struct virtio_crypto_akcipher_ctx { struct virtio_crypto *vcrypto; - struct crypto_akcipher *tfm; bool session_valid; __u64 session_id; union { @@ -36,8 +35,6 @@ struct virtio_crypto_akcipher_ctx { struct virtio_crypto_akcipher_request { struct virtio_crypto_request base; - struct virtio_crypto_akcipher_ctx *akcipher_ctx; - struct akcipher_request *akcipher_req; void *src_buf; void *dst_buf; uint32_t opcode; @@ -69,7 +66,9 @@ static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request * { struct virtio_crypto_akcipher_request *vc_akcipher_req = container_of(vc_req, struct virtio_crypto_akcipher_request, base); - struct akcipher_request *akcipher_req; + struct akcipher_request *akcipher_req = + container_of((void *)vc_akcipher_req, struct akcipher_request, + __ctx); int error; switch (vc_req->status) { @@ -88,8 +87,7 @@ static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request * break; } - akcipher_req = vc_akcipher_req->akcipher_req; - /* actual length maybe less than dst buffer */ + /* actual length may be less than dst buffer */ akcipher_req->dst_len = len - sizeof(vc_req->status); sg_copy_from_buffer(akcipher_req->dst, sg_nents(akcipher_req->dst), vc_akcipher_req->dst_buf, akcipher_req->dst_len); @@ -213,7 +211,8 @@ out: static int __virtio_crypto_akcipher_do_req(struct virtio_crypto_akcipher_request *vc_akcipher_req, struct akcipher_request *req, struct data_queue *data_vq) { - struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx; + struct crypto_akcipher *atfm = crypto_akcipher_reqtfm(req); + struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(atfm); struct virtio_crypto_request *vc_req = &vc_akcipher_req->base; struct virtio_crypto *vcrypto = ctx->vcrypto; struct virtio_crypto_op_data_req *req_data = vc_req->req_data; @@ -273,7 +272,8 @@ static int virtio_crypto_rsa_do_req(struct crypto_engine *engine, void *vreq) struct akcipher_request *req = container_of(vreq, struct akcipher_request, base); struct virtio_crypto_akcipher_request *vc_akcipher_req = akcipher_request_ctx(req); struct virtio_crypto_request *vc_req = &vc_akcipher_req->base; - struct virtio_crypto_akcipher_ctx *ctx = vc_akcipher_req->akcipher_ctx; + struct crypto_akcipher *atfm = crypto_akcipher_reqtfm(req); + struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(atfm); struct virtio_crypto *vcrypto = ctx->vcrypto; struct data_queue *data_vq = vc_req->dataq; struct virtio_crypto_op_header *header; @@ -319,8 +319,6 @@ static int virtio_crypto_rsa_req(struct akcipher_request *req, uint32_t opcode) vc_req->dataq = data_vq; vc_req->alg_cb = virtio_crypto_dataq_akcipher_callback; - vc_akcipher_req->akcipher_ctx = ctx; - vc_akcipher_req->akcipher_req = req; vc_akcipher_req->opcode = opcode; return crypto_transfer_akcipher_request_to_engine(data_vq->engine, req); @@ -352,10 +350,7 @@ static int virtio_crypto_rsa_set_key(struct crypto_akcipher *tfm, int node = virtio_crypto_get_current_node(); uint32_t keytype; int ret; - - /* mpi_free will test n, just free it. */ - mpi_free(rsa_ctx->n); - rsa_ctx->n = NULL; + MPI n; if (private) { keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE; @@ -368,10 +363,13 @@ static int virtio_crypto_rsa_set_key(struct crypto_akcipher *tfm, if (ret) return ret; - rsa_ctx->n = mpi_read_raw_data(rsa_key.n, rsa_key.n_sz); - if (!rsa_ctx->n) + n = mpi_read_raw_data(rsa_key.n, rsa_key.n_sz); + if (!n) return -ENOMEM; + rsa_ctx->key_size = mpi_get_size(n); + mpi_free(n); + if (!ctx->vcrypto) { vcrypto = virtcrypto_get_dev_node(node, VIRTIO_CRYPTO_SERVICE_AKCIPHER, VIRTIO_CRYPTO_AKCIPHER_RSA); @@ -442,15 +440,11 @@ static unsigned int virtio_crypto_rsa_max_size(struct crypto_akcipher *tfm) struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm); struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx; - return mpi_get_size(rsa_ctx->n); + return rsa_ctx->key_size; } static int virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm) { - struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm); - - ctx->tfm = tfm; - akcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_akcipher_request)); @@ -460,12 +454,9 @@ static int virtio_crypto_rsa_init_tfm(struct crypto_akcipher *tfm) static void virtio_crypto_rsa_exit_tfm(struct crypto_akcipher *tfm) { struct virtio_crypto_akcipher_ctx *ctx = akcipher_tfm_ctx(tfm); - struct virtio_crypto_rsa_ctx *rsa_ctx = &ctx->rsa_ctx; virtio_crypto_alg_akcipher_close_session(ctx); virtcrypto_dev_put(ctx->vcrypto); - mpi_free(rsa_ctx->n); - rsa_ctx->n = NULL; } static struct virtio_crypto_akcipher_algo virtio_crypto_akcipher_algs[] = { diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c index d0278eb568b9..0d522049f595 100644 --- a/drivers/crypto/virtio/virtio_crypto_core.c +++ b/drivers/crypto/virtio/virtio_crypto_core.c @@ -480,10 +480,8 @@ static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto) for (i = 0; i < vcrypto->max_data_queues; i++) { vq = vcrypto->data_vq[i].vq; - while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) { - kfree(vc_req->req_data); - kfree(vc_req->sgs); - } + while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) + virtcrypto_clear_request(vc_req); cond_resched(); } } diff --git a/drivers/crypto/virtio/virtio_crypto_mgr.c b/drivers/crypto/virtio/virtio_crypto_mgr.c index 70e778aac0f2..bddbd8ebfebe 100644 --- a/drivers/crypto/virtio/virtio_crypto_mgr.c +++ b/drivers/crypto/virtio/virtio_crypto_mgr.c @@ -256,7 +256,7 @@ int virtcrypto_dev_start(struct virtio_crypto *vcrypto) * @vcrypto: Pointer to virtio crypto device. * * Function notifies all the registered services that the virtio crypto device - * is ready to be used. + * shall no longer be used. * To be used by virtio crypto device specific drivers. * * Return: void diff --git a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c index 23c41d87d835..1b3fb21a2a7d 100644 --- a/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c +++ b/drivers/crypto/virtio/virtio_crypto_skcipher_algs.c @@ -17,7 +17,6 @@ struct virtio_crypto_skcipher_ctx { struct virtio_crypto *vcrypto; - struct crypto_skcipher *tfm; struct virtio_crypto_sym_session_info enc_sess_info; struct virtio_crypto_sym_session_info dec_sess_info; @@ -28,8 +27,6 @@ struct virtio_crypto_sym_request { /* Cipher or aead */ uint32_t type; - struct virtio_crypto_skcipher_ctx *skcipher_ctx; - struct skcipher_request *skcipher_req; uint8_t *iv; /* Encryption? */ bool encrypt; @@ -57,7 +54,9 @@ static void virtio_crypto_dataq_sym_callback { struct virtio_crypto_sym_request *vc_sym_req = container_of(vc_req, struct virtio_crypto_sym_request, base); - struct skcipher_request *ablk_req; + struct skcipher_request *ablk_req = + container_of((void *)vc_sym_req, struct skcipher_request, + __ctx); int error; /* Finish the encrypt or decrypt process */ @@ -77,7 +76,6 @@ static void virtio_crypto_dataq_sym_callback error = -EIO; break; } - ablk_req = vc_sym_req->skcipher_req; virtio_crypto_skcipher_finalize_req(vc_sym_req, ablk_req, error); } @@ -325,7 +323,7 @@ __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, struct data_queue *data_vq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct virtio_crypto_skcipher_ctx *ctx = vc_sym_req->skcipher_ctx; + struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); struct virtio_crypto_request *vc_req = &vc_sym_req->base; unsigned int ivsize = crypto_skcipher_ivsize(tfm); struct virtio_crypto *vcrypto = ctx->vcrypto; @@ -481,8 +479,6 @@ static int virtio_crypto_skcipher_encrypt(struct skcipher_request *req) vc_req->dataq = data_vq; vc_req->alg_cb = virtio_crypto_dataq_sym_callback; - vc_sym_req->skcipher_ctx = ctx; - vc_sym_req->skcipher_req = req; vc_sym_req->encrypt = true; return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req); @@ -506,8 +502,6 @@ static int virtio_crypto_skcipher_decrypt(struct skcipher_request *req) vc_req->dataq = data_vq; vc_req->alg_cb = virtio_crypto_dataq_sym_callback; - vc_sym_req->skcipher_ctx = ctx; - vc_sym_req->skcipher_req = req; vc_sym_req->encrypt = false; return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req); @@ -515,10 +509,7 @@ static int virtio_crypto_skcipher_decrypt(struct skcipher_request *req) static int virtio_crypto_skcipher_init(struct crypto_skcipher *tfm) { - struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); - crypto_skcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_sym_request)); - ctx->tfm = tfm; return 0; } diff --git a/fs/ubifs/compress.c b/fs/ubifs/compress.c index 0b48cbab8a3d..ea6f06adcd43 100644 --- a/fs/ubifs/compress.c +++ b/fs/ubifs/compress.c @@ -15,7 +15,8 @@ * decompression. */ -#include <linux/crypto.h> +#include <crypto/acompress.h> +#include <linux/highmem.h> #include "ubifs.h" /* Fake description object for the "none" compressor */ @@ -26,11 +27,8 @@ static struct ubifs_compressor none_compr = { }; #ifdef CONFIG_UBIFS_FS_LZO -static DEFINE_MUTEX(lzo_mutex); - static struct ubifs_compressor lzo_compr = { .compr_type = UBIFS_COMPR_LZO, - .comp_mutex = &lzo_mutex, .name = "lzo", .capi_name = "lzo", }; @@ -42,13 +40,8 @@ static struct ubifs_compressor lzo_compr = { #endif #ifdef CONFIG_UBIFS_FS_ZLIB -static DEFINE_MUTEX(deflate_mutex); -static DEFINE_MUTEX(inflate_mutex); - static struct ubifs_compressor zlib_compr = { .compr_type = UBIFS_COMPR_ZLIB, - .comp_mutex = &deflate_mutex, - .decomp_mutex = &inflate_mutex, .name = "zlib", .capi_name = "deflate", }; @@ -60,13 +53,8 @@ static struct ubifs_compressor zlib_compr = { #endif #ifdef CONFIG_UBIFS_FS_ZSTD -static DEFINE_MUTEX(zstd_enc_mutex); -static DEFINE_MUTEX(zstd_dec_mutex); - static struct ubifs_compressor zstd_compr = { .compr_type = UBIFS_COMPR_ZSTD, - .comp_mutex = &zstd_enc_mutex, - .decomp_mutex = &zstd_dec_mutex, .name = "zstd", .capi_name = "zstd", }; @@ -80,6 +68,30 @@ static struct ubifs_compressor zstd_compr = { /* All UBIFS compressors */ struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT]; +static int ubifs_compress_req(const struct ubifs_info *c, + struct acomp_req *req, + void *out_buf, int *out_len, + const char *compr_name) +{ + struct crypto_wait wait; + int in_len = req->slen; + int dlen = *out_len; + int err; + + dlen = min(dlen, in_len - UBIFS_MIN_COMPRESS_DIFF); + + crypto_init_wait(&wait); + acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + crypto_req_done, &wait); + acomp_request_set_dst_dma(req, out_buf, dlen); + err = crypto_acomp_compress(req); + err = crypto_wait_req(err, &wait); + *out_len = req->dlen; + acomp_request_free(req); + + return err; +} + /** * ubifs_compress - compress data. * @c: UBIFS file-system description object @@ -112,23 +124,14 @@ void ubifs_compress(const struct ubifs_info *c, const void *in_buf, if (in_len < UBIFS_MIN_COMPR_LEN) goto no_compr; - if (compr->comp_mutex) - mutex_lock(compr->comp_mutex); - err = crypto_comp_compress(compr->cc, in_buf, in_len, out_buf, - (unsigned int *)out_len); - if (compr->comp_mutex) - mutex_unlock(compr->comp_mutex); - if (unlikely(err)) { - ubifs_warn(c, "cannot compress %d bytes, compressor %s, error %d, leave data uncompressed", - in_len, compr->name, err); - goto no_compr; + { + ACOMP_REQUEST_ALLOC(req, compr->cc, GFP_NOFS | __GFP_NOWARN); + + acomp_request_set_src_dma(req, in_buf, in_len); + err = ubifs_compress_req(c, req, out_buf, out_len, compr->name); } - /* - * If the data compressed only slightly, it is better to leave it - * uncompressed to improve read speed. - */ - if (in_len - *out_len < UBIFS_MIN_COMPRESS_DIFF) + if (err) goto no_compr; return; @@ -140,6 +143,83 @@ no_compr: } /** + * ubifs_compress_folio - compress folio. + * @c: UBIFS file-system description object + * @in_folio: data to compress + * @in_offset: offset into @in_folio + * @in_len: length of the data to compress + * @out_buf: output buffer where compressed data should be stored + * @out_len: output buffer length is returned here + * @compr_type: type of compression to use on enter, actually used compression + * type on exit + * + * This function compresses input folio @in_folio of length @in_len and + * stores the result in the output buffer @out_buf and the resulting length + * in @out_len. If the input buffer does not compress, it is just copied + * to the @out_buf. The same happens if @compr_type is %UBIFS_COMPR_NONE + * or if compression error occurred. + * + * Note, if the input buffer was not compressed, it is copied to the output + * buffer and %UBIFS_COMPR_NONE is returned in @compr_type. + */ +void ubifs_compress_folio(const struct ubifs_info *c, struct folio *in_folio, + size_t in_offset, int in_len, void *out_buf, + int *out_len, int *compr_type) +{ + int err; + struct ubifs_compressor *compr = ubifs_compressors[*compr_type]; + + if (*compr_type == UBIFS_COMPR_NONE) + goto no_compr; + + /* If the input data is small, do not even try to compress it */ + if (in_len < UBIFS_MIN_COMPR_LEN) + goto no_compr; + + { + ACOMP_REQUEST_ALLOC(req, compr->cc, GFP_NOFS | __GFP_NOWARN); + + acomp_request_set_src_folio(req, in_folio, in_offset, in_len); + err = ubifs_compress_req(c, req, out_buf, out_len, compr->name); + } + + if (err) + goto no_compr; + + return; + +no_compr: + memcpy_from_folio(out_buf, in_folio, in_offset, in_len); + *out_len = in_len; + *compr_type = UBIFS_COMPR_NONE; +} + +static int ubifs_decompress_req(const struct ubifs_info *c, + struct acomp_req *req, + const void *in_buf, int in_len, int *out_len, + const char *compr_name) +{ + struct crypto_wait wait; + int err; + + crypto_init_wait(&wait); + acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + crypto_req_done, &wait); + acomp_request_set_src_dma(req, in_buf, in_len); + err = crypto_acomp_decompress(req); + err = crypto_wait_req(err, &wait); + *out_len = req->dlen; + + if (err) + ubifs_err(c, "cannot decompress %d bytes, compressor %s, error %d", + in_len, compr_name, err); + + acomp_request_free(req); + + return err; +} + +/** * ubifs_decompress - decompress data. * @c: UBIFS file-system description object * @in_buf: data to decompress @@ -155,7 +235,6 @@ no_compr: int ubifs_decompress(const struct ubifs_info *c, const void *in_buf, int in_len, void *out_buf, int *out_len, int compr_type) { - int err; struct ubifs_compressor *compr; if (unlikely(compr_type < 0 || compr_type >= UBIFS_COMPR_TYPES_CNT)) { @@ -176,17 +255,62 @@ int ubifs_decompress(const struct ubifs_info *c, const void *in_buf, return 0; } - if (compr->decomp_mutex) - mutex_lock(compr->decomp_mutex); - err = crypto_comp_decompress(compr->cc, in_buf, in_len, out_buf, - (unsigned int *)out_len); - if (compr->decomp_mutex) - mutex_unlock(compr->decomp_mutex); - if (err) - ubifs_err(c, "cannot decompress %d bytes, compressor %s, error %d", - in_len, compr->name, err); + { + ACOMP_REQUEST_ALLOC(req, compr->cc, GFP_NOFS | __GFP_NOWARN); - return err; + acomp_request_set_dst_dma(req, out_buf, *out_len); + return ubifs_decompress_req(c, req, in_buf, in_len, out_len, + compr->name); + } +} + +/** + * ubifs_decompress_folio - decompress folio. + * @c: UBIFS file-system description object + * @in_buf: data to decompress + * @in_len: length of the data to decompress + * @out_folio: output folio where decompressed data should + * @out_offset: offset into @out_folio + * @out_len: output length is returned here + * @compr_type: type of compression + * + * This function decompresses data from buffer @in_buf into folio + * @out_folio. The length of the uncompressed data is returned in + * @out_len. This functions returns %0 on success or a negative error + * code on failure. + */ +int ubifs_decompress_folio(const struct ubifs_info *c, const void *in_buf, + int in_len, struct folio *out_folio, + size_t out_offset, int *out_len, int compr_type) +{ + struct ubifs_compressor *compr; + + if (unlikely(compr_type < 0 || compr_type >= UBIFS_COMPR_TYPES_CNT)) { + ubifs_err(c, "invalid compression type %d", compr_type); + return -EINVAL; + } + + compr = ubifs_compressors[compr_type]; + + if (unlikely(!compr->capi_name)) { + ubifs_err(c, "%s compression is not compiled in", compr->name); + return -EINVAL; + } + + if (compr_type == UBIFS_COMPR_NONE) { + memcpy_to_folio(out_folio, out_offset, in_buf, in_len); + *out_len = in_len; + return 0; + } + + { + ACOMP_REQUEST_ALLOC(req, compr->cc, GFP_NOFS | __GFP_NOWARN); + + acomp_request_set_dst_folio(req, out_folio, out_offset, + *out_len); + return ubifs_decompress_req(c, req, in_buf, in_len, out_len, + compr->name); + } } /** @@ -199,7 +323,7 @@ int ubifs_decompress(const struct ubifs_info *c, const void *in_buf, static int __init compr_init(struct ubifs_compressor *compr) { if (compr->capi_name) { - compr->cc = crypto_alloc_comp(compr->capi_name, 0, 0); + compr->cc = crypto_alloc_acomp(compr->capi_name, 0, 0); if (IS_ERR(compr->cc)) { pr_err("UBIFS error (pid %d): cannot initialize compressor %s, error %ld", current->pid, compr->name, PTR_ERR(compr->cc)); @@ -218,7 +342,7 @@ static int __init compr_init(struct ubifs_compressor *compr) static void compr_exit(struct ubifs_compressor *compr) { if (compr->capi_name) - crypto_free_comp(compr->cc); + crypto_free_acomp(compr->cc); } /** diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 5130123005e4..bf311c38d9a8 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -42,8 +42,8 @@ #include <linux/slab.h> #include <linux/migrate.h> -static int read_block(struct inode *inode, void *addr, unsigned int block, - struct ubifs_data_node *dn) +static int read_block(struct inode *inode, struct folio *folio, size_t offset, + unsigned int block, struct ubifs_data_node *dn) { struct ubifs_info *c = inode->i_sb->s_fs_info; int err, len, out_len; @@ -55,7 +55,7 @@ static int read_block(struct inode *inode, void *addr, unsigned int block, if (err) { if (err == -ENOENT) /* Not found, so it must be a hole */ - memset(addr, 0, UBIFS_BLOCK_SIZE); + folio_zero_range(folio, offset, UBIFS_BLOCK_SIZE); return err; } @@ -74,8 +74,8 @@ static int read_block(struct inode *inode, void *addr, unsigned int block, } out_len = UBIFS_BLOCK_SIZE; - err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len, - le16_to_cpu(dn->compr_type)); + err = ubifs_decompress_folio(c, &dn->data, dlen, folio, offset, + &out_len, le16_to_cpu(dn->compr_type)); if (err || len != out_len) goto dump; @@ -85,7 +85,7 @@ static int read_block(struct inode *inode, void *addr, unsigned int block, * appending data). Ensure that the remainder is zeroed out. */ if (len < UBIFS_BLOCK_SIZE) - memset(addr + len, 0, UBIFS_BLOCK_SIZE - len); + folio_zero_range(folio, offset + len, UBIFS_BLOCK_SIZE - len); return 0; @@ -98,27 +98,25 @@ dump: static int do_readpage(struct folio *folio) { - void *addr; int err = 0, i; unsigned int block, beyond; struct ubifs_data_node *dn = NULL; struct inode *inode = folio->mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; loff_t i_size = i_size_read(inode); + size_t offset = 0; dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx", inode->i_ino, folio->index, i_size, folio->flags); ubifs_assert(c, !folio_test_checked(folio)); ubifs_assert(c, !folio->private); - addr = kmap_local_folio(folio, 0); - block = folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; beyond = (i_size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT; if (block >= beyond) { /* Reading beyond inode */ folio_set_checked(folio); - addr = folio_zero_tail(folio, 0, addr); + folio_zero_range(folio, 0, folio_size(folio)); goto out; } @@ -135,9 +133,9 @@ static int do_readpage(struct folio *folio) if (block >= beyond) { /* Reading beyond inode */ err = -ENOENT; - memset(addr, 0, UBIFS_BLOCK_SIZE); + folio_zero_range(folio, offset, UBIFS_BLOCK_SIZE); } else { - ret = read_block(inode, addr, block, dn); + ret = read_block(inode, folio, offset, block, dn); if (ret) { err = ret; if (err != -ENOENT) @@ -147,17 +145,13 @@ static int do_readpage(struct folio *folio) int ilen = i_size & (UBIFS_BLOCK_SIZE - 1); if (ilen && ilen < dlen) - memset(addr + ilen, 0, dlen - ilen); + folio_zero_range(folio, offset + ilen, dlen - ilen); } } if (++i >= (UBIFS_BLOCKS_PER_PAGE << folio_order(folio))) break; block += 1; - addr += UBIFS_BLOCK_SIZE; - if (folio_test_highmem(folio) && (offset_in_page(addr) == 0)) { - kunmap_local(addr - UBIFS_BLOCK_SIZE); - addr = kmap_local_folio(folio, i * UBIFS_BLOCK_SIZE); - } + offset += UBIFS_BLOCK_SIZE; } if (err) { @@ -177,8 +171,6 @@ out: kfree(dn); if (!err) folio_mark_uptodate(folio); - flush_dcache_folio(folio); - kunmap_local(addr); return err; } @@ -602,18 +594,16 @@ static int populate_page(struct ubifs_info *c, struct folio *folio, struct inode *inode = folio->mapping->host; loff_t i_size = i_size_read(inode); unsigned int page_block; - void *addr, *zaddr; + size_t offset = 0; pgoff_t end_index; dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx", inode->i_ino, folio->index, i_size, folio->flags); - addr = zaddr = kmap_local_folio(folio, 0); - end_index = (i_size - 1) >> PAGE_SHIFT; if (!i_size || folio->index > end_index) { hole = 1; - addr = folio_zero_tail(folio, 0, addr); + folio_zero_range(folio, 0, folio_size(folio)); goto out_hole; } @@ -623,7 +613,7 @@ static int populate_page(struct ubifs_info *c, struct folio *folio, if (nn >= bu->cnt) { hole = 1; - memset(addr, 0, UBIFS_BLOCK_SIZE); + folio_zero_range(folio, offset, UBIFS_BLOCK_SIZE); } else if (key_block(c, &bu->zbranch[nn].key) == page_block) { struct ubifs_data_node *dn; @@ -645,13 +635,15 @@ static int populate_page(struct ubifs_info *c, struct folio *folio, goto out_err; } - err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len, - le16_to_cpu(dn->compr_type)); + err = ubifs_decompress_folio( + c, &dn->data, dlen, folio, offset, &out_len, + le16_to_cpu(dn->compr_type)); if (err || len != out_len) goto out_err; if (len < UBIFS_BLOCK_SIZE) - memset(addr + len, 0, UBIFS_BLOCK_SIZE - len); + folio_zero_range(folio, offset + len, + UBIFS_BLOCK_SIZE - len); nn += 1; read = (i << UBIFS_BLOCK_SHIFT) + len; @@ -660,23 +652,19 @@ static int populate_page(struct ubifs_info *c, struct folio *folio, continue; } else { hole = 1; - memset(addr, 0, UBIFS_BLOCK_SIZE); + folio_zero_range(folio, offset, UBIFS_BLOCK_SIZE); } if (++i >= UBIFS_BLOCKS_PER_PAGE) break; - addr += UBIFS_BLOCK_SIZE; + offset += UBIFS_BLOCK_SIZE; page_block += 1; - if (folio_test_highmem(folio) && (offset_in_page(addr) == 0)) { - kunmap_local(addr - UBIFS_BLOCK_SIZE); - addr = kmap_local_folio(folio, i * UBIFS_BLOCK_SIZE); - } } if (end_index == folio->index) { int len = i_size & (PAGE_SIZE - 1); if (len && len < read) - memset(zaddr + len, 0, read - len); + folio_zero_range(folio, len, read - len); } out_hole: @@ -686,14 +674,10 @@ out_hole: } folio_mark_uptodate(folio); - flush_dcache_folio(folio); - kunmap_local(addr); *n = nn; return 0; out_err: - flush_dcache_folio(folio); - kunmap_local(addr); ubifs_err(c, "bad data node (block %u, inode %lu)", page_block, inode->i_ino); return -EINVAL; @@ -898,7 +882,6 @@ static int do_writepage(struct folio *folio, size_t len) { int err = 0, blen; unsigned int block; - void *addr; size_t offset = 0; union ubifs_key key; struct inode *inode = folio->mapping->host; @@ -913,26 +896,19 @@ static int do_writepage(struct folio *folio, size_t len) folio_start_writeback(folio); - addr = kmap_local_folio(folio, offset); block = folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; for (;;) { blen = min_t(size_t, len, UBIFS_BLOCK_SIZE); data_key_init(c, &key, inode->i_ino, block); - err = ubifs_jnl_write_data(c, inode, &key, addr, blen); + err = ubifs_jnl_write_data(c, inode, &key, folio, offset, blen); if (err) break; len -= blen; if (!len) break; block += 1; - addr += blen; - if (folio_test_highmem(folio) && !offset_in_page(addr)) { - kunmap_local(addr - blen); - offset += PAGE_SIZE; - addr = kmap_local_folio(folio, offset); - } + offset += blen; } - kunmap_local(addr); if (err) { mapping_set_error(folio->mapping, err); ubifs_err(c, "cannot write folio %lu of inode %lu, error %d", diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index 36ba79fbd2ff..ee954e64ce7f 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c @@ -845,14 +845,16 @@ out_ro: * @c: UBIFS file-system description object * @inode: inode the data node belongs to * @key: node key - * @buf: buffer to write + * @folio: buffer to write + * @offset: offset to write at * @len: data length (must not exceed %UBIFS_BLOCK_SIZE) * * This function writes a data node to the journal. Returns %0 if the data node * was successfully written, and a negative error code in case of failure. */ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode, - const union ubifs_key *key, const void *buf, int len) + const union ubifs_key *key, struct folio *folio, + size_t offset, int len) { struct ubifs_data_node *data; int err, lnum, offs, compr_type, out_len, compr_len, auth_len; @@ -896,7 +898,8 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode, compr_type = ui->compr_type; out_len = compr_len = dlen - UBIFS_DATA_NODE_SZ; - ubifs_compress(c, buf, len, &data->data, &compr_len, &compr_type); + ubifs_compress_folio(c, folio, offset, len, &data->data, &compr_len, + &compr_type); ubifs_assert(c, compr_len <= UBIFS_BLOCK_SIZE); if (encrypted) { @@ -1625,7 +1628,7 @@ static int truncate_data_node(const struct ubifs_info *c, const struct inode *in int err, dlen, compr_type, out_len, data_size; out_len = le32_to_cpu(dn->size); - buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS); + buf = kmalloc(out_len, GFP_NOFS); if (!buf) return -ENOMEM; diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 3375bbe0508c..256dbaeeb0de 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -124,13 +124,6 @@ #define OLD_ZNODE_AGE 20 #define YOUNG_ZNODE_AGE 5 -/* - * Some compressors, like LZO, may end up with more data then the input buffer. - * So UBIFS always allocates larger output buffer, to be sure the compressor - * will not corrupt memory in case of worst case compression. - */ -#define WORST_COMPR_FACTOR 2 - #ifdef CONFIG_FS_ENCRYPTION #define UBIFS_CIPHER_BLOCK_SIZE FSCRYPT_CONTENTS_ALIGNMENT #else @@ -141,7 +134,7 @@ * How much memory is needed for a buffer where we compress a data node. */ #define COMPRESSED_DATA_NODE_BUF_SZ \ - (UBIFS_DATA_NODE_SZ + UBIFS_BLOCK_SIZE * WORST_COMPR_FACTOR) + (UBIFS_DATA_NODE_SZ + UBIFS_BLOCK_SIZE) /* Maximum expected tree height for use by bottom_up_buf */ #define BOTTOM_UP_HEIGHT 64 @@ -270,6 +263,8 @@ enum { ASSACT_PANIC, }; +struct folio; + /** * struct ubifs_old_idx - index node obsoleted since last commit start. * @rb: rb-tree node @@ -835,16 +830,12 @@ struct ubifs_node_range { * struct ubifs_compressor - UBIFS compressor description structure. * @compr_type: compressor type (%UBIFS_COMPR_LZO, etc) * @cc: cryptoapi compressor handle - * @comp_mutex: mutex used during compression - * @decomp_mutex: mutex used during decompression * @name: compressor name * @capi_name: cryptoapi compressor name */ struct ubifs_compressor { int compr_type; - struct crypto_comp *cc; - struct mutex *comp_mutex; - struct mutex *decomp_mutex; + struct crypto_acomp *cc; const char *name; const char *capi_name; }; @@ -1795,7 +1786,8 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir, const struct fscrypt_name *nm, const struct inode *inode, int deletion, int xent, int in_orphan); int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode, - const union ubifs_key *key, const void *buf, int len); + const union ubifs_key *key, struct folio *folio, + size_t offset, int len); int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode); int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode); int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir, @@ -2095,8 +2087,14 @@ int __init ubifs_compressors_init(void); void ubifs_compressors_exit(void); void ubifs_compress(const struct ubifs_info *c, const void *in_buf, int in_len, void *out_buf, int *out_len, int *compr_type); +void ubifs_compress_folio(const struct ubifs_info *c, struct folio *folio, + size_t offset, int in_len, void *out_buf, + int *out_len, int *compr_type); int ubifs_decompress(const struct ubifs_info *c, const void *buf, int len, void *out, int *out_len, int compr_type); +int ubifs_decompress_folio(const struct ubifs_info *c, const void *buf, + int len, struct folio *folio, size_t offset, + int *out_len, int compr_type); /* sysfs.c */ int ubifs_sysfs_init(void); diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h index 54937b615239..c497c73baf13 100644 --- a/include/crypto/acompress.h +++ b/include/crypto/acompress.h @@ -10,30 +10,105 @@ #define _CRYPTO_ACOMP_H #include <linux/atomic.h> +#include <linux/args.h> +#include <linux/compiler_types.h> #include <linux/container_of.h> #include <linux/crypto.h> +#include <linux/err.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/spinlock_types.h> +#include <linux/types.h> + +/* Set this bit if source is virtual address instead of SG list. */ +#define CRYPTO_ACOMP_REQ_SRC_VIRT 0x00000002 + +/* Set this bit for if virtual address source cannot be used for DMA. */ +#define CRYPTO_ACOMP_REQ_SRC_NONDMA 0x00000004 + +/* Set this bit if destination is virtual address instead of SG list. */ +#define CRYPTO_ACOMP_REQ_DST_VIRT 0x00000008 + +/* Set this bit for if virtual address destination cannot be used for DMA. */ +#define CRYPTO_ACOMP_REQ_DST_NONDMA 0x00000010 + +/* Set this bit if source is a folio. */ +#define CRYPTO_ACOMP_REQ_SRC_FOLIO 0x00000020 + +/* Set this bit if destination is a folio. */ +#define CRYPTO_ACOMP_REQ_DST_FOLIO 0x00000040 -#define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001 #define CRYPTO_ACOMP_DST_MAX 131072 +#define MAX_SYNC_COMP_REQSIZE 0 + +#define ACOMP_REQUEST_ALLOC(name, tfm, gfp) \ + char __##name##_req[sizeof(struct acomp_req) + \ + MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \ + struct acomp_req *name = acomp_request_on_stack_init( \ + __##name##_req, (tfm), (gfp), false) + +struct acomp_req; +struct folio; + +struct acomp_req_chain { + struct list_head head; + struct acomp_req *req0; + struct acomp_req *cur; + int (*op)(struct acomp_req *req); + crypto_completion_t compl; + void *data; + struct scatterlist ssg; + struct scatterlist dsg; + union { + const u8 *src; + struct folio *sfolio; + }; + union { + u8 *dst; + struct folio *dfolio; + }; + size_t soff; + size_t doff; + u32 flags; +}; + /** * struct acomp_req - asynchronous (de)compression request * * @base: Common attributes for asynchronous crypto requests - * @src: Source Data - * @dst: Destination data + * @src: Source scatterlist + * @dst: Destination scatterlist + * @svirt: Source virtual address + * @dvirt: Destination virtual address + * @sfolio: Source folio + * @soff: Source folio offset + * @dfolio: Destination folio + * @doff: Destination folio offset * @slen: Size of the input buffer * @dlen: Size of the output buffer and number of bytes produced - * @flags: Internal flags + * @chain: Private API code data, do not use * @__ctx: Start of private context data */ struct acomp_req { struct crypto_async_request base; - struct scatterlist *src; - struct scatterlist *dst; + union { + struct scatterlist *src; + const u8 *svirt; + struct folio *sfolio; + }; + union { + struct scatterlist *dst; + u8 *dvirt; + struct folio *dfolio; + }; + size_t soff; + size_t doff; unsigned int slen; unsigned int dlen; - u32 flags; + + struct acomp_req_chain chain; + void *__ctx[] CRYPTO_MINALIGN_ATTR; }; @@ -43,21 +118,26 @@ struct acomp_req { * * @compress: Function performs a compress operation * @decompress: Function performs a de-compress operation - * @dst_free: Frees destination buffer if allocated inside the - * algorithm * @reqsize: Context size for (de)compression requests + * @fb: Synchronous fallback tfm * @base: Common crypto API algorithm data structure */ struct crypto_acomp { int (*compress)(struct acomp_req *req); int (*decompress)(struct acomp_req *req); - void (*dst_free)(struct scatterlist *dst); unsigned int reqsize; + struct crypto_acomp *fb; struct crypto_tfm base; }; +struct crypto_acomp_stream { + spinlock_t lock; + void *ctx; +}; + #define COMP_ALG_COMMON { \ struct crypto_alg base; \ + struct crypto_acomp_stream __percpu *stream; \ } struct comp_alg_common COMP_ALG_COMMON; @@ -168,14 +248,67 @@ static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask) return crypto_has_alg(alg_name, type, mask); } +static inline const char *crypto_acomp_alg_name(struct crypto_acomp *tfm) +{ + return crypto_tfm_alg_name(crypto_acomp_tfm(tfm)); +} + +static inline const char *crypto_acomp_driver_name(struct crypto_acomp *tfm) +{ + return crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)); +} + /** * acomp_request_alloc() -- allocates asynchronous (de)compression request * * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() + * @gfp: gfp to pass to kzalloc (defaults to GFP_KERNEL) * * Return: allocated handle in case of success or NULL in case of an error */ -struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm); +static inline struct acomp_req *acomp_request_alloc_extra_noprof( + struct crypto_acomp *tfm, size_t extra, gfp_t gfp) +{ + struct acomp_req *req; + size_t len; + + len = ALIGN(sizeof(*req) + crypto_acomp_reqsize(tfm), CRYPTO_MINALIGN); + if (check_add_overflow(len, extra, &len)) + return NULL; + + req = kzalloc_noprof(len, gfp); + if (likely(req)) + acomp_request_set_tfm(req, tfm); + return req; +} +#define acomp_request_alloc_noprof(tfm, ...) \ + CONCATENATE(acomp_request_alloc_noprof_, COUNT_ARGS(__VA_ARGS__))( \ + tfm, ##__VA_ARGS__) +#define acomp_request_alloc_noprof_0(tfm) \ + acomp_request_alloc_noprof_1(tfm, GFP_KERNEL) +#define acomp_request_alloc_noprof_1(tfm, gfp) \ + acomp_request_alloc_extra_noprof(tfm, 0, gfp) +#define acomp_request_alloc(...) alloc_hooks(acomp_request_alloc_noprof(__VA_ARGS__)) + +/** + * acomp_request_alloc_extra() -- allocate acomp request with extra memory + * + * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() + * @extra: amount of extra memory + * @gfp: gfp to pass to kzalloc + * + * Return: allocated handle in case of success or NULL in case of an error + */ +#define acomp_request_alloc_extra(...) alloc_hooks(acomp_request_alloc_extra_noprof(__VA_ARGS__)) + +static inline void *acomp_request_extra(struct acomp_req *req) +{ + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + size_t len; + + len = ALIGN(sizeof(*req) + crypto_acomp_reqsize(tfm), CRYPTO_MINALIGN); + return (void *)((char *)req + len); +} /** * acomp_request_free() -- zeroize and free asynchronous (de)compression @@ -184,7 +317,12 @@ struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm); * * @req: request to free */ -void acomp_request_free(struct acomp_req *req); +static inline void acomp_request_free(struct acomp_req *req) +{ + if (!req || (req->base.flags & CRYPTO_TFM_REQ_ON_STACK)) + return; + kfree_sensitive(req); +} /** * acomp_request_set_callback() -- Sets an asynchronous callback @@ -202,10 +340,17 @@ static inline void acomp_request_set_callback(struct acomp_req *req, crypto_completion_t cmpl, void *data) { + u32 keep = CRYPTO_ACOMP_REQ_SRC_VIRT | CRYPTO_ACOMP_REQ_SRC_NONDMA | + CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA | + CRYPTO_ACOMP_REQ_SRC_FOLIO | CRYPTO_ACOMP_REQ_DST_FOLIO | + CRYPTO_TFM_REQ_ON_STACK; + req->base.complete = cmpl; req->base.data = data; - req->base.flags &= CRYPTO_ACOMP_ALLOC_OUTPUT; - req->base.flags |= flgs & ~CRYPTO_ACOMP_ALLOC_OUTPUT; + req->base.flags &= keep; + req->base.flags |= flgs & ~keep; + + crypto_reqchain_init(&req->base); } /** @@ -232,9 +377,191 @@ static inline void acomp_request_set_params(struct acomp_req *req, req->slen = slen; req->dlen = dlen; - req->flags &= ~CRYPTO_ACOMP_ALLOC_OUTPUT; - if (!req->dst) - req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT; + req->base.flags &= ~(CRYPTO_ACOMP_REQ_SRC_VIRT | + CRYPTO_ACOMP_REQ_SRC_NONDMA | + CRYPTO_ACOMP_REQ_SRC_FOLIO | + CRYPTO_ACOMP_REQ_DST_FOLIO | + CRYPTO_ACOMP_REQ_DST_VIRT | + CRYPTO_ACOMP_REQ_DST_NONDMA); +} + +/** + * acomp_request_set_src_sg() -- Sets source scatterlist + * + * Sets source scatterlist required by an acomp operation. + * + * @req: asynchronous compress request + * @src: pointer to input buffer scatterlist + * @slen: size of the input buffer + */ +static inline void acomp_request_set_src_sg(struct acomp_req *req, + struct scatterlist *src, + unsigned int slen) +{ + req->src = src; + req->slen = slen; + + req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA; + req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_VIRT; + req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_FOLIO; +} + +/** + * acomp_request_set_src_dma() -- Sets DMA source virtual address + * + * Sets source virtual address required by an acomp operation. + * The address must be usable for DMA. + * + * @req: asynchronous compress request + * @src: virtual address pointer to input buffer + * @slen: size of the input buffer + */ +static inline void acomp_request_set_src_dma(struct acomp_req *req, + const u8 *src, unsigned int slen) +{ + req->svirt = src; + req->slen = slen; + + req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA; + req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_FOLIO; + req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT; +} + +/** + * acomp_request_set_src_nondma() -- Sets non-DMA source virtual address + * + * Sets source virtual address required by an acomp operation. + * The address can not be used for DMA. + * + * @req: asynchronous compress request + * @src: virtual address pointer to input buffer + * @slen: size of the input buffer + */ +static inline void acomp_request_set_src_nondma(struct acomp_req *req, + const u8 *src, + unsigned int slen) +{ + req->svirt = src; + req->slen = slen; + + req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_FOLIO; + req->base.flags |= CRYPTO_ACOMP_REQ_SRC_NONDMA; + req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT; +} + +/** + * acomp_request_set_src_folio() -- Sets source folio + * + * Sets source folio required by an acomp operation. + * + * @req: asynchronous compress request + * @folio: pointer to input folio + * @off: input folio offset + * @len: size of the input buffer + */ +static inline void acomp_request_set_src_folio(struct acomp_req *req, + struct folio *folio, size_t off, + unsigned int len) +{ + req->sfolio = folio; + req->soff = off; + req->slen = len; + + req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA; + req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_VIRT; + req->base.flags |= CRYPTO_ACOMP_REQ_SRC_FOLIO; +} + +/** + * acomp_request_set_dst_sg() -- Sets destination scatterlist + * + * Sets destination scatterlist required by an acomp operation. + * + * @req: asynchronous compress request + * @dst: pointer to output buffer scatterlist + * @dlen: size of the output buffer + */ +static inline void acomp_request_set_dst_sg(struct acomp_req *req, + struct scatterlist *dst, + unsigned int dlen) +{ + req->dst = dst; + req->dlen = dlen; + + req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA; + req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_VIRT; + req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_FOLIO; +} + +/** + * acomp_request_set_dst_dma() -- Sets DMA destination virtual address + * + * Sets destination virtual address required by an acomp operation. + * The address must be usable for DMA. + * + * @req: asynchronous compress request + * @dst: virtual address pointer to output buffer + * @dlen: size of the output buffer + */ +static inline void acomp_request_set_dst_dma(struct acomp_req *req, + u8 *dst, unsigned int dlen) +{ + req->dvirt = dst; + req->dlen = dlen; + + req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA; + req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_FOLIO; + req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT; +} + +/** + * acomp_request_set_dst_nondma() -- Sets non-DMA destination virtual address + * + * Sets destination virtual address required by an acomp operation. + * The address can not be used for DMA. + * + * @req: asynchronous compress request + * @dst: virtual address pointer to output buffer + * @dlen: size of the output buffer + */ +static inline void acomp_request_set_dst_nondma(struct acomp_req *req, + u8 *dst, unsigned int dlen) +{ + req->dvirt = dst; + req->dlen = dlen; + + req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_FOLIO; + req->base.flags |= CRYPTO_ACOMP_REQ_DST_NONDMA; + req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT; +} + +/** + * acomp_request_set_dst_folio() -- Sets destination folio + * + * Sets destination folio required by an acomp operation. + * + * @req: asynchronous compress request + * @folio: pointer to input folio + * @off: input folio offset + * @len: size of the input buffer + */ +static inline void acomp_request_set_dst_folio(struct acomp_req *req, + struct folio *folio, size_t off, + unsigned int len) +{ + req->dfolio = folio; + req->doff = off; + req->dlen = len; + + req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA; + req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_VIRT; + req->base.flags |= CRYPTO_ACOMP_REQ_DST_FOLIO; +} + +static inline void acomp_request_chain(struct acomp_req *req, + struct acomp_req *head) +{ + crypto_request_chain(&req->base, &head->base); } /** @@ -246,10 +573,7 @@ static inline void acomp_request_set_params(struct acomp_req *req, * * Return: zero on success; error code in case of error */ -static inline int crypto_acomp_compress(struct acomp_req *req) -{ - return crypto_acomp_reqtfm(req)->compress(req); -} +int crypto_acomp_compress(struct acomp_req *req); /** * crypto_acomp_decompress() -- Invoke asynchronous decompress operation @@ -260,9 +584,21 @@ static inline int crypto_acomp_compress(struct acomp_req *req) * * Return: zero on success; error code in case of error */ -static inline int crypto_acomp_decompress(struct acomp_req *req) +int crypto_acomp_decompress(struct acomp_req *req); + +static inline struct acomp_req *acomp_request_on_stack_init( + char *buf, struct crypto_acomp *tfm, gfp_t gfp, bool stackonly) { - return crypto_acomp_reqtfm(req)->decompress(req); + struct acomp_req *req; + + if (!stackonly && (req = acomp_request_alloc(tfm, gfp))) + return req; + + req = (void *)buf; + acomp_request_set_tfm(req, tfm->fb); + req->base.flags = CRYPTO_TFM_REQ_ON_STACK; + + return req; } #endif diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 156de41ca760..6e07bbc04089 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -11,6 +11,7 @@ #include <linux/align.h> #include <linux/cache.h> #include <linux/crypto.h> +#include <linux/list.h> #include <linux/types.h> #include <linux/workqueue.h> @@ -53,20 +54,7 @@ struct rtattr; struct scatterlist; struct seq_file; struct sk_buff; - -struct crypto_type { - unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask); - unsigned int (*extsize)(struct crypto_alg *alg); - int (*init_tfm)(struct crypto_tfm *tfm); - void (*show)(struct seq_file *m, struct crypto_alg *alg); - int (*report)(struct sk_buff *skb, struct crypto_alg *alg); - void (*free)(struct crypto_instance *inst); - - unsigned int type; - unsigned int maskclear; - unsigned int maskset; - unsigned int tfmsize; -}; +union crypto_no_such_thing; struct crypto_instance { struct crypto_alg alg; @@ -119,6 +107,13 @@ struct crypto_queue { }; struct scatter_walk { + /* Must be the first member, see struct skcipher_walk. */ + union { + void *const addr; + + /* Private API field, do not touch. */ + union crypto_no_such_thing *__addr; + }; struct scatterlist *sg; unsigned int offset; }; @@ -271,4 +266,14 @@ static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm) return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK; } +static inline bool crypto_request_chained(struct crypto_async_request *req) +{ + return !list_empty(&req->list); +} + +static inline bool crypto_tfm_req_chain(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_flags & CRYPTO_ALG_REQ_CHAIN; +} + #endif /* _CRYPTO_ALGAPI_H */ diff --git a/include/crypto/authenc.h b/include/crypto/authenc.h index 5f92a986083c..15a9caa2354a 100644 --- a/include/crypto/authenc.h +++ b/include/crypto/authenc.h @@ -28,5 +28,7 @@ struct crypto_authenc_keys { int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, unsigned int keylen); +int crypto_krb5enc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, + unsigned int keylen); #endif /* _CRYPTO_AUTHENC_H */ diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h index 5bae6a55b333..f8cc073bba41 100644 --- a/include/crypto/chacha.h +++ b/include/crypto/chacha.h @@ -62,8 +62,7 @@ static inline void chacha_init_consts(u32 *state) state[3] = CHACHA_CONSTANT_TE_K; } -void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv); -static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv) +static inline void chacha_init(u32 *state, const u32 *key, const u8 *iv) { chacha_init_consts(state); state[4] = key[0]; @@ -80,14 +79,6 @@ static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv) state[15] = get_unaligned_le32(iv + 12); } -static inline void chacha_init(u32 *state, const u32 *key, const u8 *iv) -{ - if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA)) - chacha_init_arch(state, key, iv); - else - chacha_init_generic(state, key, iv); -} - void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, int nrounds); void chacha_crypt_generic(u32 *state, u8 *dst, const u8 *src, diff --git a/include/crypto/ctr.h b/include/crypto/ctr.h index a1c66d1001af..da1ee73e9ce9 100644 --- a/include/crypto/ctr.h +++ b/include/crypto/ctr.h @@ -34,8 +34,8 @@ static inline int crypto_ctr_encrypt_walk(struct skcipher_request *req, err = skcipher_walk_virt(&walk, req, false); while (walk.nbytes > 0) { + const u8 *src = walk.src.virt.addr; u8 *dst = walk.dst.virt.addr; - u8 *src = walk.src.virt.addr; int nbytes = walk.nbytes; int tail = 0; diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 2d5ea9f9ff43..2aa83ee0ec98 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -12,6 +12,9 @@ #include <linux/crypto.h> #include <linux/string.h> +/* Set this bit for virtual address instead of SG list. */ +#define CRYPTO_AHASH_REQ_VIRT 0x00000001 + struct crypto_ahash; /** @@ -52,12 +55,12 @@ struct ahash_request { struct crypto_async_request base; unsigned int nbytes; - struct scatterlist *src; + union { + struct scatterlist *src; + const u8 *svirt; + }; u8 *result; - /* This field may only be used by the ahash API code. */ - void *priv; - void *__ctx[] CRYPTO_MINALIGN_ATTR; }; @@ -132,6 +135,7 @@ struct ahash_request { * This is a counterpart to @init_tfm, used to remove * various changes set in @init_tfm. * @clone_tfm: Copy transform into new object, may allocate memory. + * @reqsize: Size of the request context. * @halg: see struct hash_alg_common */ struct ahash_alg { @@ -148,6 +152,8 @@ struct ahash_alg { void (*exit_tfm)(struct crypto_ahash *tfm); int (*clone_tfm)(struct crypto_ahash *dst, struct crypto_ahash *src); + unsigned int reqsize; + struct hash_alg_common halg; }; @@ -575,16 +581,7 @@ static inline struct ahash_request *ahash_request_alloc_noprof( * ahash_request_free() - zeroize and free the request data structure * @req: request data structure cipher handle to be freed */ -static inline void ahash_request_free(struct ahash_request *req) -{ - kfree_sensitive(req); -} - -static inline void ahash_request_zero(struct ahash_request *req) -{ - memzero_explicit(req, sizeof(*req) + - crypto_ahash_reqsize(crypto_ahash_reqtfm(req))); -} +void ahash_request_free(struct ahash_request *req); static inline struct ahash_request *ahash_request_cast( struct crypto_async_request *req) @@ -622,9 +619,14 @@ static inline void ahash_request_set_callback(struct ahash_request *req, crypto_completion_t compl, void *data) { + u32 keep = CRYPTO_AHASH_REQ_VIRT; + req->base.complete = compl; req->base.data = data; - req->base.flags = flags; + flags &= ~keep; + req->base.flags &= keep; + req->base.flags |= flags; + crypto_reqchain_init(&req->base); } /** @@ -647,6 +649,36 @@ static inline void ahash_request_set_crypt(struct ahash_request *req, req->src = src; req->nbytes = nbytes; req->result = result; + req->base.flags &= ~CRYPTO_AHASH_REQ_VIRT; +} + +/** + * ahash_request_set_virt() - set virtual address data buffers + * @req: ahash_request handle to be updated + * @src: source virtual address + * @result: buffer that is filled with the message digest -- the caller must + * ensure that the buffer has sufficient space by, for example, calling + * crypto_ahash_digestsize() + * @nbytes: number of bytes to process from the source virtual address + * + * By using this call, the caller references the source virtual address. + * The source virtual address points to the data the message digest is to + * be calculated for. + */ +static inline void ahash_request_set_virt(struct ahash_request *req, + const u8 *src, u8 *result, + unsigned int nbytes) +{ + req->svirt = src; + req->nbytes = nbytes; + req->result = result; + req->base.flags |= CRYPTO_AHASH_REQ_VIRT; +} + +static inline void ahash_request_chain(struct ahash_request *req, + struct ahash_request *head) +{ + crypto_request_chain(&req->base, &head->base); } /** @@ -950,4 +982,14 @@ static inline void shash_desc_zero(struct shash_desc *desc) sizeof(*desc) + crypto_shash_descsize(desc->tfm)); } +static inline int ahash_request_err(struct ahash_request *req) +{ + return req->base.err; +} + +static inline bool ahash_is_async(struct crypto_ahash *tfm) +{ + return crypto_tfm_is_async(&tfm->base); +} + #endif /* _CRYPTO_HASH_H */ diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h index 8831edaafc05..aaf59f3236fa 100644 --- a/include/crypto/internal/acompress.h +++ b/include/crypto/internal/acompress.h @@ -12,12 +12,17 @@ #include <crypto/acompress.h> #include <crypto/algapi.h> +#define ACOMP_REQUEST_ON_STACK(name, tfm) \ + char __##name##_req[sizeof(struct acomp_req) + \ + MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \ + struct acomp_req *name = acomp_request_on_stack_init( \ + __##name##_req, (tfm), 0, true) + /** * struct acomp_alg - asynchronous compression algorithm * * @compress: Function performs a compress operation * @decompress: Function performs a de-compress operation - * @dst_free: Frees destination buffer if allocated inside the algorithm * @init: Initialize the cryptographic transformation object. * This function is used to initialize the cryptographic * transformation object. This function is called only once at @@ -32,12 +37,12 @@ * * @reqsize: Context size for (de)compression requests * @base: Common crypto API algorithm data structure + * @stream: Per-cpu memory for algorithm * @calg: Cmonn algorithm data structure shared with scomp */ struct acomp_alg { int (*compress)(struct acomp_req *req); int (*decompress)(struct acomp_req *req); - void (*dst_free)(struct scatterlist *dst); int (*init)(struct crypto_acomp *tfm); void (*exit)(struct crypto_acomp *tfm); @@ -68,22 +73,6 @@ static inline void acomp_request_complete(struct acomp_req *req, crypto_request_complete(&req->base, err); } -static inline struct acomp_req *__acomp_request_alloc_noprof(struct crypto_acomp *tfm) -{ - struct acomp_req *req; - - req = kzalloc_noprof(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL); - if (likely(req)) - acomp_request_set_tfm(req, tfm); - return req; -} -#define __acomp_request_alloc(...) alloc_hooks(__acomp_request_alloc_noprof(__VA_ARGS__)) - -static inline void __acomp_request_free(struct acomp_req *req) -{ - kfree_sensitive(req); -} - /** * crypto_register_acomp() -- Register asynchronous compression algorithm * @@ -109,4 +98,64 @@ void crypto_unregister_acomp(struct acomp_alg *alg); int crypto_register_acomps(struct acomp_alg *algs, int count); void crypto_unregister_acomps(struct acomp_alg *algs, int count); +static inline bool acomp_request_chained(struct acomp_req *req) +{ + return crypto_request_chained(&req->base); +} + +static inline bool acomp_request_issg(struct acomp_req *req) +{ + return !(req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT | + CRYPTO_ACOMP_REQ_DST_VIRT | + CRYPTO_ACOMP_REQ_SRC_FOLIO | + CRYPTO_ACOMP_REQ_DST_FOLIO)); +} + +static inline bool acomp_request_src_isvirt(struct acomp_req *req) +{ + return req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT; +} + +static inline bool acomp_request_dst_isvirt(struct acomp_req *req) +{ + return req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT; +} + +static inline bool acomp_request_isvirt(struct acomp_req *req) +{ + return req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT | + CRYPTO_ACOMP_REQ_DST_VIRT); +} + +static inline bool acomp_request_src_isnondma(struct acomp_req *req) +{ + return req->base.flags & CRYPTO_ACOMP_REQ_SRC_NONDMA; +} + +static inline bool acomp_request_dst_isnondma(struct acomp_req *req) +{ + return req->base.flags & CRYPTO_ACOMP_REQ_DST_NONDMA; +} + +static inline bool acomp_request_isnondma(struct acomp_req *req) +{ + return req->base.flags & (CRYPTO_ACOMP_REQ_SRC_NONDMA | + CRYPTO_ACOMP_REQ_DST_NONDMA); +} + +static inline bool acomp_request_src_isfolio(struct acomp_req *req) +{ + return req->base.flags & CRYPTO_ACOMP_REQ_SRC_FOLIO; +} + +static inline bool acomp_request_dst_isfolio(struct acomp_req *req) +{ + return req->base.flags & CRYPTO_ACOMP_REQ_DST_FOLIO; +} + +static inline bool crypto_acomp_req_chain(struct crypto_acomp *tfm) +{ + return crypto_tfm_req_chain(&tfm->base); +} + #endif diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 84da3424decc..485e22cf517e 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -247,5 +247,20 @@ static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm) return container_of(tfm, struct crypto_shash, base); } +static inline bool ahash_request_chained(struct ahash_request *req) +{ + return crypto_request_chained(&req->base); +} + +static inline bool ahash_request_isvirt(struct ahash_request *req) +{ + return req->base.flags & CRYPTO_AHASH_REQ_VIRT; +} + +static inline bool crypto_ahash_req_chain(struct crypto_ahash *tfm) +{ + return crypto_tfm_req_chain(&tfm->base); +} + #endif /* _CRYPTO_INTERNAL_HASH_H */ diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h index 07a10fd2d321..f25aa2ea3b48 100644 --- a/include/crypto/internal/scompress.h +++ b/include/crypto/internal/scompress.h @@ -12,8 +12,6 @@ #include <crypto/acompress.h> #include <crypto/algapi.h> -#define SCOMP_SCRATCH_SIZE 131072 - struct acomp_req; struct crypto_scomp { @@ -28,11 +26,12 @@ struct crypto_scomp { * @compress: Function performs a compress operation * @decompress: Function performs a de-compress operation * @base: Common crypto API algorithm data structure + * @stream: Per-cpu memory for algorithm * @calg: Cmonn algorithm data structure shared with acomp */ struct scomp_alg { - void *(*alloc_ctx)(struct crypto_scomp *tfm); - void (*free_ctx)(struct crypto_scomp *tfm, void *ctx); + void *(*alloc_ctx)(void); + void (*free_ctx)(void *ctx); int (*compress)(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx); @@ -71,17 +70,6 @@ static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm) return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg); } -static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm) -{ - return crypto_scomp_alg(tfm)->alloc_ctx(tfm); -} - -static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm, - void *ctx) -{ - return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx); -} - static inline int crypto_scomp_compress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index 4f49621d3eb6..a958ab0636ad 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -56,15 +56,31 @@ struct crypto_lskcipher_spawn { struct skcipher_walk { union { + /* Virtual address of the source. */ struct { - void *addr; - } virt; - } src, dst; + struct { + const void *const addr; + } virt; + } src; + + /* Private field for the API, do not use. */ + struct scatter_walk in; + }; - struct scatter_walk in; unsigned int nbytes; - struct scatter_walk out; + union { + /* Virtual address of the destination. */ + struct { + struct { + void *const addr; + } virt; + } dst; + + /* Private field for the API, do not use. */ + struct scatter_walk out; + }; + unsigned int total; u8 *page; @@ -197,13 +213,15 @@ int lskcipher_register_instance(struct crypto_template *tmpl, struct lskcipher_instance *inst); int skcipher_walk_done(struct skcipher_walk *walk, int res); -int skcipher_walk_virt(struct skcipher_walk *walk, - struct skcipher_request *req, +int skcipher_walk_virt(struct skcipher_walk *__restrict walk, + struct skcipher_request *__restrict req, bool atomic); -int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, - struct aead_request *req, bool atomic); -int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, - struct aead_request *req, bool atomic); +int skcipher_walk_aead_encrypt(struct skcipher_walk *__restrict walk, + struct aead_request *__restrict req, + bool atomic); +int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk, + struct aead_request *__restrict req, + bool atomic); static inline void skcipher_walk_abort(struct skcipher_walk *walk) { diff --git a/include/crypto/krb5.h b/include/crypto/krb5.h new file mode 100644 index 000000000000..62d998e62f47 --- /dev/null +++ b/include/crypto/krb5.h @@ -0,0 +1,160 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Kerberos 5 crypto + * + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _CRYPTO_KRB5_H +#define _CRYPTO_KRB5_H + +#include <linux/crypto.h> +#include <crypto/aead.h> +#include <crypto/hash.h> + +struct crypto_shash; +struct scatterlist; + +/* + * Per Kerberos v5 protocol spec crypto types from the wire. These get mapped + * to linux kernel crypto routines. + */ +#define KRB5_ENCTYPE_NULL 0x0000 +#define KRB5_ENCTYPE_DES_CBC_CRC 0x0001 /* DES cbc mode with CRC-32 */ +#define KRB5_ENCTYPE_DES_CBC_MD4 0x0002 /* DES cbc mode with RSA-MD4 */ +#define KRB5_ENCTYPE_DES_CBC_MD5 0x0003 /* DES cbc mode with RSA-MD5 */ +#define KRB5_ENCTYPE_DES_CBC_RAW 0x0004 /* DES cbc mode raw */ +/* XXX deprecated? */ +#define KRB5_ENCTYPE_DES3_CBC_SHA 0x0005 /* DES-3 cbc mode with NIST-SHA */ +#define KRB5_ENCTYPE_DES3_CBC_RAW 0x0006 /* DES-3 cbc mode raw */ +#define KRB5_ENCTYPE_DES_HMAC_SHA1 0x0008 +#define KRB5_ENCTYPE_DES3_CBC_SHA1 0x0010 +#define KRB5_ENCTYPE_AES128_CTS_HMAC_SHA1_96 0x0011 +#define KRB5_ENCTYPE_AES256_CTS_HMAC_SHA1_96 0x0012 +#define KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128 0x0013 +#define KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192 0x0014 +#define KRB5_ENCTYPE_ARCFOUR_HMAC 0x0017 +#define KRB5_ENCTYPE_ARCFOUR_HMAC_EXP 0x0018 +#define KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC 0x0019 +#define KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC 0x001a +#define KRB5_ENCTYPE_UNKNOWN 0x01ff + +#define KRB5_CKSUMTYPE_CRC32 0x0001 +#define KRB5_CKSUMTYPE_RSA_MD4 0x0002 +#define KRB5_CKSUMTYPE_RSA_MD4_DES 0x0003 +#define KRB5_CKSUMTYPE_DESCBC 0x0004 +#define KRB5_CKSUMTYPE_RSA_MD5 0x0007 +#define KRB5_CKSUMTYPE_RSA_MD5_DES 0x0008 +#define KRB5_CKSUMTYPE_NIST_SHA 0x0009 +#define KRB5_CKSUMTYPE_HMAC_SHA1_DES3 0x000c +#define KRB5_CKSUMTYPE_HMAC_SHA1_96_AES128 0x000f +#define KRB5_CKSUMTYPE_HMAC_SHA1_96_AES256 0x0010 +#define KRB5_CKSUMTYPE_CMAC_CAMELLIA128 0x0011 +#define KRB5_CKSUMTYPE_CMAC_CAMELLIA256 0x0012 +#define KRB5_CKSUMTYPE_HMAC_SHA256_128_AES128 0x0013 +#define KRB5_CKSUMTYPE_HMAC_SHA384_192_AES256 0x0014 +#define KRB5_CKSUMTYPE_HMAC_MD5_ARCFOUR -138 /* Microsoft md5 hmac cksumtype */ + +/* + * Constants used for key derivation + */ +/* from rfc3961 */ +#define KEY_USAGE_SEED_CHECKSUM (0x99) +#define KEY_USAGE_SEED_ENCRYPTION (0xAA) +#define KEY_USAGE_SEED_INTEGRITY (0x55) + +/* + * Mode of operation. + */ +enum krb5_crypto_mode { + KRB5_CHECKSUM_MODE, /* Checksum only */ + KRB5_ENCRYPT_MODE, /* Fully encrypted, possibly with integrity checksum */ +}; + +struct krb5_buffer { + unsigned int len; + void *data; +}; + +/* + * Kerberos encoding type definition. + */ +struct krb5_enctype { + int etype; /* Encryption (key) type */ + int ctype; /* Checksum type */ + const char *name; /* "Friendly" name */ + const char *encrypt_name; /* Crypto encrypt+checksum name */ + const char *cksum_name; /* Crypto checksum name */ + const char *hash_name; /* Crypto hash name */ + const char *derivation_enc; /* Cipher used in key derivation */ + u16 block_len; /* Length of encryption block */ + u16 conf_len; /* Length of confounder (normally == block_len) */ + u16 cksum_len; /* Length of checksum */ + u16 key_bytes; /* Length of raw key, in bytes */ + u16 key_len; /* Length of final key, in bytes */ + u16 hash_len; /* Length of hash in bytes */ + u16 prf_len; /* Length of PRF() result in bytes */ + u16 Kc_len; /* Length of Kc in bytes */ + u16 Ke_len; /* Length of Ke in bytes */ + u16 Ki_len; /* Length of Ki in bytes */ + bool keyed_cksum; /* T if a keyed cksum */ + + const struct krb5_crypto_profile *profile; + + int (*random_to_key)(const struct krb5_enctype *krb5, + const struct krb5_buffer *in, + struct krb5_buffer *out); /* complete key generation */ +}; + +/* + * krb5_api.c + */ +const struct krb5_enctype *crypto_krb5_find_enctype(u32 enctype); +size_t crypto_krb5_how_much_buffer(const struct krb5_enctype *krb5, + enum krb5_crypto_mode mode, + size_t data_size, size_t *_offset); +size_t crypto_krb5_how_much_data(const struct krb5_enctype *krb5, + enum krb5_crypto_mode mode, + size_t *_buffer_size, size_t *_offset); +void crypto_krb5_where_is_the_data(const struct krb5_enctype *krb5, + enum krb5_crypto_mode mode, + size_t *_offset, size_t *_len); +struct crypto_aead *crypto_krb5_prepare_encryption(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + u32 usage, gfp_t gfp); +struct crypto_shash *crypto_krb5_prepare_checksum(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + u32 usage, gfp_t gfp); +ssize_t crypto_krb5_encrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t sg_len, + size_t data_offset, size_t data_len, + bool preconfounded); +int crypto_krb5_decrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len); +ssize_t crypto_krb5_get_mic(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, + size_t sg_len, + size_t data_offset, size_t data_len); +int crypto_krb5_verify_mic(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len); + +/* + * krb5_kdf.c + */ +int crypto_krb5_calc_PRFplus(const struct krb5_enctype *krb5, + const struct krb5_buffer *K, + unsigned int L, + const struct krb5_buffer *S, + struct krb5_buffer *result, + gfp_t gfp); + +#endif /* _CRYPTO_KRB5_H */ diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 32fc4473175b..94a8585f26b2 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -26,76 +26,218 @@ static inline void scatterwalk_crypto_chain(struct scatterlist *head, sg_mark_end(head); } -static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk) +static inline void scatterwalk_start(struct scatter_walk *walk, + struct scatterlist *sg) +{ + walk->sg = sg; + walk->offset = sg->offset; +} + +/* + * This is equivalent to scatterwalk_start(walk, sg) followed by + * scatterwalk_skip(walk, pos). + */ +static inline void scatterwalk_start_at_pos(struct scatter_walk *walk, + struct scatterlist *sg, + unsigned int pos) { - unsigned int len = walk->sg->offset + walk->sg->length - walk->offset; - unsigned int len_this_page = offset_in_page(~walk->offset) + 1; - return len_this_page > len ? len : len_this_page; + while (pos > sg->length) { + pos -= sg->length; + sg = sg_next(sg); + } + walk->sg = sg; + walk->offset = sg->offset + pos; } static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk, unsigned int nbytes) { - unsigned int len_this_page = scatterwalk_pagelen(walk); - return nbytes > len_this_page ? len_this_page : nbytes; + unsigned int len_this_sg; + unsigned int limit; + + if (walk->offset >= walk->sg->offset + walk->sg->length) + scatterwalk_start(walk, sg_next(walk->sg)); + len_this_sg = walk->sg->offset + walk->sg->length - walk->offset; + + /* + * HIGHMEM case: the page may have to be mapped into memory. To avoid + * the complexity of having to map multiple pages at once per sg entry, + * clamp the returned length to not cross a page boundary. + * + * !HIGHMEM case: no mapping is needed; all pages of the sg entry are + * already mapped contiguously in the kernel's direct map. For improved + * performance, allow the walker to return data segments that cross a + * page boundary. Do still cap the length to PAGE_SIZE, since some + * users rely on that to avoid disabling preemption for too long when + * using SIMD. It's also needed for when skcipher_walk uses a bounce + * page due to the data not being aligned to the algorithm's alignmask. + */ + if (IS_ENABLED(CONFIG_HIGHMEM)) + limit = PAGE_SIZE - offset_in_page(walk->offset); + else + limit = PAGE_SIZE; + + return min3(nbytes, len_this_sg, limit); } -static inline void scatterwalk_advance(struct scatter_walk *walk, - unsigned int nbytes) +/* + * Create a scatterlist that represents the remaining data in a walk. Uses + * chaining to reference the original scatterlist, so this uses at most two + * entries in @sg_out regardless of the number of entries in the original list. + * Assumes that sg_init_table() was already done. + */ +static inline void scatterwalk_get_sglist(struct scatter_walk *walk, + struct scatterlist sg_out[2]) { - walk->offset += nbytes; + if (walk->offset >= walk->sg->offset + walk->sg->length) + scatterwalk_start(walk, sg_next(walk->sg)); + sg_set_page(sg_out, sg_page(walk->sg), + walk->sg->offset + walk->sg->length - walk->offset, + walk->offset); + scatterwalk_crypto_chain(sg_out, sg_next(walk->sg), 2); } -static inline struct page *scatterwalk_page(struct scatter_walk *walk) +static inline void scatterwalk_map(struct scatter_walk *walk) { - return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); + struct page *base_page = sg_page(walk->sg); + unsigned int offset = walk->offset; + void *addr; + + if (IS_ENABLED(CONFIG_HIGHMEM)) { + struct page *page; + + page = nth_page(base_page, offset >> PAGE_SHIFT); + offset = offset_in_page(offset); + addr = kmap_local_page(page) + offset; + } else { + /* + * When !HIGHMEM we allow the walker to return segments that + * span a page boundary; see scatterwalk_clamp(). To make it + * clear that in this case we're working in the linear buffer of + * the whole sg entry in the kernel's direct map rather than + * within the mapped buffer of a single page, compute the + * address as an offset from the page_address() of the first + * page of the sg entry. Either way the result is the address + * in the direct map, but this makes it clearer what is really + * going on. + */ + addr = page_address(base_page) + offset; + } + + walk->__addr = addr; } -static inline void scatterwalk_unmap(void *vaddr) +/** + * scatterwalk_next() - Get the next data buffer in a scatterlist walk + * @walk: the scatter_walk + * @total: the total number of bytes remaining, > 0 + * + * A virtual address for the next segment of data from the scatterlist will + * be placed into @walk->addr. The caller must call scatterwalk_done_src() + * or scatterwalk_done_dst() when it is done using this virtual address. + * + * Returns: the next number of bytes available, <= @total + */ +static inline unsigned int scatterwalk_next(struct scatter_walk *walk, + unsigned int total) { - kunmap_local(vaddr); + unsigned int nbytes = scatterwalk_clamp(walk, total); + + scatterwalk_map(walk); + return nbytes; } -static inline void scatterwalk_start(struct scatter_walk *walk, - struct scatterlist *sg) +static inline void scatterwalk_unmap(struct scatter_walk *walk) { - walk->sg = sg; - walk->offset = sg->offset; + if (IS_ENABLED(CONFIG_HIGHMEM)) + kunmap_local(walk->__addr); } -static inline void *scatterwalk_map(struct scatter_walk *walk) +static inline void scatterwalk_advance(struct scatter_walk *walk, + unsigned int nbytes) { - return kmap_local_page(scatterwalk_page(walk)) + - offset_in_page(walk->offset); + walk->offset += nbytes; } -static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out, - unsigned int more) +/** + * scatterwalk_done_src() - Finish one step of a walk of source scatterlist + * @walk: the scatter_walk + * @nbytes: the number of bytes processed this step, less than or equal to the + * number of bytes that scatterwalk_next() returned. + * + * Use this if the mapped address was not written to, i.e. it is source data. + */ +static inline void scatterwalk_done_src(struct scatter_walk *walk, + unsigned int nbytes) { - if (out) { - struct page *page; - - page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT); - flush_dcache_page(page); - } - - if (more && walk->offset >= walk->sg->offset + walk->sg->length) - scatterwalk_start(walk, sg_next(walk->sg)); + scatterwalk_unmap(walk); + scatterwalk_advance(walk, nbytes); } -static inline void scatterwalk_done(struct scatter_walk *walk, int out, - int more) +/** + * scatterwalk_done_dst() - Finish one step of a walk of destination scatterlist + * @walk: the scatter_walk + * @nbytes: the number of bytes processed this step, less than or equal to the + * number of bytes that scatterwalk_next() returned. + * + * Use this if the mapped address may have been written to, i.e. it is + * destination data. + */ +static inline void scatterwalk_done_dst(struct scatter_walk *walk, + unsigned int nbytes) { - if (!more || walk->offset >= walk->sg->offset + walk->sg->length || - !(walk->offset & (PAGE_SIZE - 1))) - scatterwalk_pagedone(walk, out, more); + scatterwalk_unmap(walk); + /* + * Explicitly check ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE instead of just + * relying on flush_dcache_page() being a no-op when not implemented, + * since otherwise the BUG_ON in sg_page() does not get optimized out. + * This also avoids having to consider whether the loop would get + * reliably optimized out or not. + */ + if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE) { + struct page *base_page; + unsigned int offset; + int start, end, i; + + base_page = sg_page(walk->sg); + offset = walk->offset; + start = offset >> PAGE_SHIFT; + end = start + (nbytes >> PAGE_SHIFT); + end += (offset_in_page(offset) + offset_in_page(nbytes) + + PAGE_SIZE - 1) >> PAGE_SHIFT; + for (i = start; i < end; i++) + flush_dcache_page(nth_page(base_page, i)); + } + scatterwalk_advance(walk, nbytes); } -void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, - size_t nbytes, int out); +void scatterwalk_skip(struct scatter_walk *walk, unsigned int nbytes); + +void memcpy_from_scatterwalk(void *buf, struct scatter_walk *walk, + unsigned int nbytes); + +void memcpy_to_scatterwalk(struct scatter_walk *walk, const void *buf, + unsigned int nbytes); -void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, - unsigned int start, unsigned int nbytes, int out); +void memcpy_from_sglist(void *buf, struct scatterlist *sg, + unsigned int start, unsigned int nbytes); + +void memcpy_to_sglist(struct scatterlist *sg, unsigned int start, + const void *buf, unsigned int nbytes); + +void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes); + +/* In new code, please use memcpy_{from,to}_sglist() directly instead. */ +static inline void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, + unsigned int start, + unsigned int nbytes, int out) +{ + if (out) + memcpy_to_sglist(sg, start, buf, nbytes); + else + memcpy_from_sglist(buf, sg, start, nbytes); +} struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], struct scatterlist *src, diff --git a/include/crypto/sig.h b/include/crypto/sig.h index cff41ad93824..11024708c069 100644 --- a/include/crypto/sig.h +++ b/include/crypto/sig.h @@ -23,7 +23,8 @@ struct crypto_sig { * struct sig_alg - generic public key signature algorithm * * @sign: Function performs a sign operation as defined by public key - * algorithm. Optional. + * algorithm. On success, the signature size is returned. + * Optional. * @verify: Function performs a complete verify operation as defined by * public key algorithm, returning verification status. Optional. * @set_pub_key: Function invokes the algorithm specific set public key @@ -186,7 +187,7 @@ static inline unsigned int crypto_sig_maxsize(struct crypto_sig *tfm) * @dst: destination obuffer * @dlen: destination length * - * Return: zero on success; error code in case of error + * Return: signature size on success; error code in case of error */ static inline int crypto_sig_sign(struct crypto_sig *tfm, const void *src, unsigned int slen, diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 18a86e0af016..9e5853464345 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -214,16 +214,17 @@ struct lskcipher_alg { #define MAX_SYNC_SKCIPHER_REQSIZE 384 /* - * This performs a type-check against the "tfm" argument to make sure + * This performs a type-check against the "_tfm" argument to make sure * all users have the correct skcipher tfm for doing on-stack requests. */ -#define SYNC_SKCIPHER_REQUEST_ON_STACK(name, tfm) \ +#define SYNC_SKCIPHER_REQUEST_ON_STACK(name, _tfm) \ char __##name##_desc[sizeof(struct skcipher_request) + \ - MAX_SYNC_SKCIPHER_REQSIZE + \ - (!(sizeof((struct crypto_sync_skcipher *)1 == \ - (typeof(tfm))1))) \ + MAX_SYNC_SKCIPHER_REQSIZE \ ] CRYPTO_MINALIGN_ATTR; \ - struct skcipher_request *name = (void *)__##name##_desc + struct skcipher_request *name = \ + (((struct skcipher_request *)__##name##_desc)->base.tfm = \ + crypto_sync_skcipher_tfm((_tfm)), \ + (void *)__##name##_desc) /** * DOC: Symmetric Key Cipher API @@ -311,6 +312,12 @@ static inline struct crypto_tfm *crypto_lskcipher_tfm( return &tfm->base; } +static inline struct crypto_tfm *crypto_sync_skcipher_tfm( + struct crypto_sync_skcipher *tfm) +{ + return crypto_skcipher_tfm(&tfm->base); +} + /** * crypto_free_skcipher() - zeroize and free cipher handle * @tfm: cipher handle to be freed diff --git a/include/dt-bindings/reset/rockchip,rk3588-cru.h b/include/dt-bindings/reset/rockchip,rk3588-cru.h index e2fe4bd5f7f0..878beae6dc3b 100644 --- a/include/dt-bindings/reset/rockchip,rk3588-cru.h +++ b/include/dt-bindings/reset/rockchip,rk3588-cru.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ /* - * Copyright (c) 2021 Rockchip Electronics Co. Ltd. + * Copyright (c) 2021, 2024 Rockchip Electronics Co. Ltd. * Copyright (c) 2022 Collabora Ltd. * * Author: Elaine Zhang <zhangqing@rock-chips.com> @@ -753,4 +753,43 @@ #define SRST_A_HDMIRX_BIU 660 +/* SCMI Secure Resets */ + +/* Name=SECURE_SOFTRST_CON00,Offset=0xA00 */ +#define SCMI_SRST_A_SECURE_NS_BIU 10 +#define SCMI_SRST_H_SECURE_NS_BIU 11 +#define SCMI_SRST_A_SECURE_S_BIU 12 +#define SCMI_SRST_H_SECURE_S_BIU 13 +#define SCMI_SRST_P_SECURE_S_BIU 14 +#define SCMI_SRST_CRYPTO_CORE 15 +/* Name=SECURE_SOFTRST_CON01,Offset=0xA04 */ +#define SCMI_SRST_CRYPTO_PKA 16 +#define SCMI_SRST_CRYPTO_RNG 17 +#define SCMI_SRST_A_CRYPTO 18 +#define SCMI_SRST_H_CRYPTO 19 +#define SCMI_SRST_KEYLADDER_CORE 25 +#define SCMI_SRST_KEYLADDER_RNG 26 +#define SCMI_SRST_A_KEYLADDER 27 +#define SCMI_SRST_H_KEYLADDER 28 +#define SCMI_SRST_P_OTPC_S 29 +#define SCMI_SRST_OTPC_S 30 +#define SCMI_SRST_WDT_S 31 +/* Name=SECURE_SOFTRST_CON02,Offset=0xA08 */ +#define SCMI_SRST_T_WDT_S 32 +#define SCMI_SRST_H_BOOTROM 33 +#define SCMI_SRST_A_DCF 34 +#define SCMI_SRST_P_DCF 35 +#define SCMI_SRST_H_BOOTROM_NS 37 +#define SCMI_SRST_P_KEYLADDER 46 +#define SCMI_SRST_H_TRNG_S 47 +/* Name=SECURE_SOFTRST_CON03,Offset=0xA0C */ +#define SCMI_SRST_H_TRNG_NS 48 +#define SCMI_SRST_D_SDMMC_BUFFER 49 +#define SCMI_SRST_H_SDMMC 50 +#define SCMI_SRST_H_SDMMC_BUFFER 51 +#define SCMI_SRST_SDMMC 52 +#define SCMI_SRST_P_TRNG_CHK 53 +#define SCMI_SRST_TRNG_S 54 + + #endif diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 5cc73d7e5b52..1ca9f9e05f4f 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -168,11 +168,6 @@ async_xor_offs(struct page *dest, unsigned int offset, int src_cnt, size_t len, struct async_submit_ctl *submit); struct dma_async_tx_descriptor * -async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, - int src_cnt, size_t len, enum sum_check_flags *result, - struct async_submit_ctl *submit); - -struct dma_async_tx_descriptor * async_xor_val_offs(struct page *dest, unsigned int offset, struct page **src_list, unsigned int *src_offset, int src_cnt, size_t len, enum sum_check_flags *result, diff --git a/include/linux/crypto.h b/include/linux/crypto.h index b164da5e129e..1e3809d28abd 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -13,6 +13,8 @@ #define _LINUX_CRYPTO_H #include <linux/completion.h> +#include <linux/errno.h> +#include <linux/list.h> #include <linux/refcount.h> #include <linux/slab.h> #include <linux/types.h> @@ -22,7 +24,6 @@ */ #define CRYPTO_ALG_TYPE_MASK 0x0000000f #define CRYPTO_ALG_TYPE_CIPHER 0x00000001 -#define CRYPTO_ALG_TYPE_COMPRESS 0x00000002 #define CRYPTO_ALG_TYPE_AEAD 0x00000003 #define CRYPTO_ALG_TYPE_LSKCIPHER 0x00000004 #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 @@ -124,6 +125,9 @@ */ #define CRYPTO_ALG_FIPS_INTERNAL 0x00020000 +/* Set if the algorithm supports request chains and virtual addresses. */ +#define CRYPTO_ALG_REQ_CHAIN 0x00040000 + /* * Transform masks and values (for crt_flags). */ @@ -133,6 +137,7 @@ #define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x00000100 #define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 #define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400 +#define CRYPTO_TFM_REQ_ON_STACK 0x00000800 /* * Miscellaneous stuff. @@ -174,6 +179,7 @@ struct crypto_async_request { struct crypto_tfm *tfm; u32 flags; + int err; }; /** @@ -239,26 +245,7 @@ struct cipher_alg { void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); }; -/** - * struct compress_alg - compression/decompression algorithm - * @coa_compress: Compress a buffer of specified length, storing the resulting - * data in the specified buffer. Return the length of the - * compressed data in dlen. - * @coa_decompress: Decompress the source buffer, storing the uncompressed - * data in the specified buffer. The length of the data is - * returned in dlen. - * - * All fields are mandatory. - */ -struct compress_alg { - int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen); - int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src, - unsigned int slen, u8 *dst, unsigned int *dlen); -}; - #define cra_cipher cra_u.cipher -#define cra_compress cra_u.compress /** * struct crypto_alg - definition of a cryptograpic cipher algorithm @@ -309,7 +296,7 @@ struct compress_alg { * transformation types. There are multiple options, such as * &crypto_skcipher_type, &crypto_ahash_type, &crypto_rng_type. * This field might be empty. In that case, there are no common - * callbacks. This is the case for: cipher, compress, shash. + * callbacks. This is the case for: cipher. * @cra_u: Callbacks implementing the transformation. This is a union of * multiple structures. Depending on the type of transformation selected * by @cra_type and @cra_flags above, the associated structure must be @@ -328,8 +315,6 @@ struct compress_alg { * @cra_init. * @cra_u.cipher: Union member which contains a single-block symmetric cipher * definition. See @struct @cipher_alg. - * @cra_u.compress: Union member which contains a (de)compression algorithm. - * See @struct @compress_alg. * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE * @cra_list: internally used * @cra_users: internally used @@ -359,7 +344,6 @@ struct crypto_alg { union { struct cipher_alg cipher; - struct compress_alg compress; } cra_u; int (*cra_init)(struct crypto_tfm *tfm); @@ -433,10 +417,6 @@ struct crypto_tfm { void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; }; -struct crypto_comp { - struct crypto_tfm base; -}; - /* * Transform user interface. */ @@ -493,52 +473,23 @@ static inline unsigned int crypto_tfm_ctx_alignment(void) return __alignof__(tfm->__crt_ctx); } -static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm) -{ - return (struct crypto_comp *)tfm; -} - -static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name, - u32 type, u32 mask) -{ - type &= ~CRYPTO_ALG_TYPE_MASK; - type |= CRYPTO_ALG_TYPE_COMPRESS; - mask |= CRYPTO_ALG_TYPE_MASK; - - return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask)); -} - -static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm) +static inline void crypto_reqchain_init(struct crypto_async_request *req) { - return &tfm->base; + req->err = -EINPROGRESS; + INIT_LIST_HEAD(&req->list); } -static inline void crypto_free_comp(struct crypto_comp *tfm) +static inline void crypto_request_chain(struct crypto_async_request *req, + struct crypto_async_request *head) { - crypto_free_tfm(crypto_comp_tfm(tfm)); + req->err = -EINPROGRESS; + list_add_tail(&req->list, &head->list); } -static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask) +static inline bool crypto_tfm_is_async(struct crypto_tfm *tfm) { - type &= ~CRYPTO_ALG_TYPE_MASK; - type |= CRYPTO_ALG_TYPE_COMPRESS; - mask |= CRYPTO_ALG_TYPE_MASK; - - return crypto_has_alg(alg_name, type, mask); + return tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC; } -static inline const char *crypto_comp_name(struct crypto_comp *tfm) -{ - return crypto_tfm_alg_name(crypto_comp_tfm(tfm)); -} - -int crypto_comp_compress(struct crypto_comp *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen); - -int crypto_comp_decompress(struct crypto_comp *tfm, - const u8 *src, unsigned int slen, - u8 *dst, unsigned int *dlen); - #endif /* _LINUX_CRYPTO_H */ diff --git a/include/linux/lzo.h b/include/linux/lzo.h index e95c7d1092b2..4d30e3624acd 100644 --- a/include/linux/lzo.h +++ b/include/linux/lzo.h @@ -24,10 +24,18 @@ int lzo1x_1_compress(const unsigned char *src, size_t src_len, unsigned char *dst, size_t *dst_len, void *wrkmem); +/* Same as above but does not write more than dst_len to dst. */ +int lzo1x_1_compress_safe(const unsigned char *src, size_t src_len, + unsigned char *dst, size_t *dst_len, void *wrkmem); + /* This requires 'wrkmem' of size LZO1X_1_MEM_COMPRESS */ int lzorle1x_1_compress(const unsigned char *src, size_t src_len, unsigned char *dst, size_t *dst_len, void *wrkmem); +/* Same as above but does not write more than dst_len to dst. */ +int lzorle1x_1_compress_safe(const unsigned char *src, size_t src_len, + unsigned char *dst, size_t *dst_len, void *wrkmem); + /* safe decompression with overrun testing */ int lzo1x_decompress_safe(const unsigned char *src, size_t src_len, unsigned char *dst, size_t *dst_len); diff --git a/include/linux/math.h b/include/linux/math.h index f5f18dc3616b..0198c92cbe3e 100644 --- a/include/linux/math.h +++ b/include/linux/math.h @@ -34,6 +34,18 @@ */ #define round_down(x, y) ((x) & ~__round_mask(x, y)) +/** + * DIV_ROUND_UP_POW2 - divide and round up + * @n: numerator + * @d: denominator (must be a power of 2) + * + * Divides @n by @d and rounds up to next multiple of @d (which must be a power + * of 2). Avoids integer overflows that may occur with __KERNEL_DIV_ROUND_UP(). + * Performance is roughly equivalent to __KERNEL_DIV_ROUND_UP(). + */ +#define DIV_ROUND_UP_POW2(n, d) \ + ((n) / (d) + !!((n) & ((d) - 1))) + #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP #define DIV_ROUND_DOWN_ULL(ll, d) \ diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index d836e7440ee8..138e2f1bd08f 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -671,6 +671,7 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter) #define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */ #define SG_MITER_TO_SG (1 << 1) /* flush back to phys on unmap */ #define SG_MITER_FROM_SG (1 << 2) /* nop */ +#define SG_MITER_LOCAL (1 << 3) /* use kmap_local */ struct sg_mapping_iter { /* the following three fields can be accessed directly */ diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 63dd8cf3c3c2..d3561c4a080e 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -548,6 +548,12 @@ DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t, DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock)) +DEFINE_LOCK_GUARD_1(raw_spinlock_bh, raw_spinlock_t, + raw_spin_lock_bh(_T->lock), + raw_spin_unlock_bh(_T->lock)) + +DEFINE_LOCK_GUARD_1_COND(raw_spinlock_bh, _try, raw_spin_trylock_bh(_T->lock)) + DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t, raw_spin_lock_irqsave(_T->lock, _T->flags), raw_spin_unlock_irqrestore(_T->lock, _T->flags), @@ -569,6 +575,13 @@ DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t, DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try, spin_trylock_irq(_T->lock)) +DEFINE_LOCK_GUARD_1(spinlock_bh, spinlock_t, + spin_lock_bh(_T->lock), + spin_unlock_bh(_T->lock)) + +DEFINE_LOCK_GUARD_1_COND(spinlock_bh, _try, + spin_trylock_bh(_T->lock)) + DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t, spin_lock_irqsave(_T->lock, _T->flags), spin_unlock_irqrestore(_T->lock, _T->flags), diff --git a/include/net/ipcomp.h b/include/net/ipcomp.h index 8660a2a6d1fc..51401f01e2a5 100644 --- a/include/net/ipcomp.h +++ b/include/net/ipcomp.h @@ -3,20 +3,9 @@ #define _NET_IPCOMP_H #include <linux/skbuff.h> -#include <linux/types.h> - -#define IPCOMP_SCRATCH_SIZE 65400 - -struct crypto_comp; -struct ip_comp_hdr; - -struct ipcomp_data { - u16 threshold; - struct crypto_comp * __percpu *tfms; -}; struct ip_comp_hdr; -struct sk_buff; +struct netlink_ext_ack; struct xfrm_state; int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb); diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h index 832c15d9155b..eeb20dfb1fda 100644 --- a/include/uapi/linux/psp-sev.h +++ b/include/uapi/linux/psp-sev.h @@ -73,13 +73,20 @@ typedef enum { SEV_RET_INVALID_PARAM, SEV_RET_RESOURCE_LIMIT, SEV_RET_SECURE_DATA_INVALID, - SEV_RET_INVALID_KEY = 0x27, - SEV_RET_INVALID_PAGE_SIZE, - SEV_RET_INVALID_PAGE_STATE, - SEV_RET_INVALID_MDATA_ENTRY, - SEV_RET_INVALID_PAGE_OWNER, - SEV_RET_INVALID_PAGE_AEAD_OFLOW, - SEV_RET_RMP_INIT_REQUIRED, + SEV_RET_INVALID_PAGE_SIZE = 0x0019, + SEV_RET_INVALID_PAGE_STATE = 0x001A, + SEV_RET_INVALID_MDATA_ENTRY = 0x001B, + SEV_RET_INVALID_PAGE_OWNER = 0x001C, + SEV_RET_AEAD_OFLOW = 0x001D, + SEV_RET_EXIT_RING_BUFFER = 0x001F, + SEV_RET_RMP_INIT_REQUIRED = 0x0020, + SEV_RET_BAD_SVN = 0x0021, + SEV_RET_BAD_VERSION = 0x0022, + SEV_RET_SHUTDOWN_REQUIRED = 0x0023, + SEV_RET_UPDATE_FAILED = 0x0024, + SEV_RET_RESTORE_REQUIRED = 0x0025, + SEV_RET_RMP_INITIALIZATION_FAILED = 0x0026, + SEV_RET_INVALID_KEY = 0x0027, SEV_RET_MAX, } sev_ret_code; diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 50ec26ea696b..23c0f4e6cb2f 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -11,6 +11,7 @@ #define pr_fmt(fmt) "PM: hibernation: " fmt +#include <crypto/acompress.h> #include <linux/blkdev.h> #include <linux/export.h> #include <linux/suspend.h> @@ -757,7 +758,7 @@ int hibernate(void) */ if (!nocompress) { strscpy(hib_comp_algo, hibernate_compressor, sizeof(hib_comp_algo)); - if (crypto_has_comp(hib_comp_algo, 0, 0) != 1) { + if (!crypto_has_acomp(hib_comp_algo, 0, CRYPTO_ALG_ASYNC)) { pr_err("%s compression is not available\n", hib_comp_algo); return -EOPNOTSUPP; } @@ -1008,7 +1009,7 @@ static int software_resume(void) strscpy(hib_comp_algo, COMPRESSION_ALGO_LZ4, sizeof(hib_comp_algo)); else strscpy(hib_comp_algo, COMPRESSION_ALGO_LZO, sizeof(hib_comp_algo)); - if (crypto_has_comp(hib_comp_algo, 0, 0) != 1) { + if (!crypto_has_acomp(hib_comp_algo, 0, CRYPTO_ALG_ASYNC)) { pr_err("%s compression is not available\n", hib_comp_algo); error = -EOPNOTSUPP; goto Unlock; diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 82b884b67152..80ff5f933a62 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -12,6 +12,7 @@ #define pr_fmt(fmt) "PM: " fmt +#include <crypto/acompress.h> #include <linux/module.h> #include <linux/file.h> #include <linux/delay.h> @@ -635,7 +636,8 @@ static int crc32_threadfn(void *data) */ struct cmp_data { struct task_struct *thr; /* thread */ - struct crypto_comp *cc; /* crypto compressor stream */ + struct crypto_acomp *cc; /* crypto compressor */ + struct acomp_req *cr; /* crypto request */ atomic_t ready; /* ready to start flag */ atomic_t stop; /* ready to stop flag */ int ret; /* return code */ @@ -656,7 +658,6 @@ static atomic_t compressed_size = ATOMIC_INIT(0); static int compress_threadfn(void *data) { struct cmp_data *d = data; - unsigned int cmp_len = 0; while (1) { wait_event(d->go, atomic_read_acquire(&d->ready) || @@ -670,11 +671,13 @@ static int compress_threadfn(void *data) } atomic_set(&d->ready, 0); - cmp_len = CMP_SIZE - CMP_HEADER; - d->ret = crypto_comp_compress(d->cc, d->unc, d->unc_len, - d->cmp + CMP_HEADER, - &cmp_len); - d->cmp_len = cmp_len; + acomp_request_set_callback(d->cr, CRYPTO_TFM_REQ_MAY_SLEEP, + NULL, NULL); + acomp_request_set_src_nondma(d->cr, d->unc, d->unc_len); + acomp_request_set_dst_nondma(d->cr, d->cmp + CMP_HEADER, + CMP_SIZE - CMP_HEADER); + d->ret = crypto_acomp_compress(d->cr); + d->cmp_len = d->cr->dlen; atomic_set(&compressed_size, atomic_read(&compressed_size) + d->cmp_len); atomic_set_release(&d->stop, 1); @@ -745,13 +748,20 @@ static int save_compressed_image(struct swap_map_handle *handle, init_waitqueue_head(&data[thr].go); init_waitqueue_head(&data[thr].done); - data[thr].cc = crypto_alloc_comp(hib_comp_algo, 0, 0); + data[thr].cc = crypto_alloc_acomp(hib_comp_algo, 0, CRYPTO_ALG_ASYNC); if (IS_ERR_OR_NULL(data[thr].cc)) { pr_err("Could not allocate comp stream %ld\n", PTR_ERR(data[thr].cc)); ret = -EFAULT; goto out_clean; } + data[thr].cr = acomp_request_alloc(data[thr].cc); + if (!data[thr].cr) { + pr_err("Could not allocate comp request\n"); + ret = -ENOMEM; + goto out_clean; + } + data[thr].thr = kthread_run(compress_threadfn, &data[thr], "image_compress/%u", thr); @@ -899,8 +909,8 @@ out_clean: for (thr = 0; thr < nr_threads; thr++) { if (data[thr].thr) kthread_stop(data[thr].thr); - if (data[thr].cc) - crypto_free_comp(data[thr].cc); + acomp_request_free(data[thr].cr); + crypto_free_acomp(data[thr].cc); } vfree(data); } @@ -1142,7 +1152,8 @@ static int load_image(struct swap_map_handle *handle, */ struct dec_data { struct task_struct *thr; /* thread */ - struct crypto_comp *cc; /* crypto compressor stream */ + struct crypto_acomp *cc; /* crypto compressor */ + struct acomp_req *cr; /* crypto request */ atomic_t ready; /* ready to start flag */ atomic_t stop; /* ready to stop flag */ int ret; /* return code */ @@ -1160,7 +1171,6 @@ struct dec_data { static int decompress_threadfn(void *data) { struct dec_data *d = data; - unsigned int unc_len = 0; while (1) { wait_event(d->go, atomic_read_acquire(&d->ready) || @@ -1174,10 +1184,13 @@ static int decompress_threadfn(void *data) } atomic_set(&d->ready, 0); - unc_len = UNC_SIZE; - d->ret = crypto_comp_decompress(d->cc, d->cmp + CMP_HEADER, d->cmp_len, - d->unc, &unc_len); - d->unc_len = unc_len; + acomp_request_set_callback(d->cr, CRYPTO_TFM_REQ_MAY_SLEEP, + NULL, NULL); + acomp_request_set_src_nondma(d->cr, d->cmp + CMP_HEADER, + d->cmp_len); + acomp_request_set_dst_nondma(d->cr, d->unc, UNC_SIZE); + d->ret = crypto_acomp_decompress(d->cr); + d->unc_len = d->cr->dlen; if (clean_pages_on_decompress) flush_icache_range((unsigned long)d->unc, @@ -1254,13 +1267,20 @@ static int load_compressed_image(struct swap_map_handle *handle, init_waitqueue_head(&data[thr].go); init_waitqueue_head(&data[thr].done); - data[thr].cc = crypto_alloc_comp(hib_comp_algo, 0, 0); + data[thr].cc = crypto_alloc_acomp(hib_comp_algo, 0, CRYPTO_ALG_ASYNC); if (IS_ERR_OR_NULL(data[thr].cc)) { pr_err("Could not allocate comp stream %ld\n", PTR_ERR(data[thr].cc)); ret = -EFAULT; goto out_clean; } + data[thr].cr = acomp_request_alloc(data[thr].cc); + if (!data[thr].cr) { + pr_err("Could not allocate comp request\n"); + ret = -ENOMEM; + goto out_clean; + } + data[thr].thr = kthread_run(decompress_threadfn, &data[thr], "image_decompress/%u", thr); @@ -1507,8 +1527,8 @@ out_clean: for (thr = 0; thr < nr_threads; thr++) { if (data[thr].thr) kthread_stop(data[thr].thr); - if (data[thr].cc) - crypto_free_comp(data[thr].cc); + acomp_request_free(data[thr].cr); + crypto_free_acomp(data[thr].cc); } vfree(data); } diff --git a/lib/842/842_compress.c b/lib/842/842_compress.c index c02baa4168e1..055356508d97 100644 --- a/lib/842/842_compress.c +++ b/lib/842/842_compress.c @@ -532,6 +532,8 @@ int sw842_compress(const u8 *in, unsigned int ilen, } if (repeat_count) { ret = add_repeat_template(p, repeat_count); + if (ret) + return ret; repeat_count = 0; if (next == last) /* reached max repeat bits */ goto repeat; diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index b01253cac70a..798972b29b68 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -42,7 +42,7 @@ config CRYPTO_LIB_BLAKE2S_GENERIC of CRYPTO_LIB_BLAKE2S. config CRYPTO_ARCH_HAVE_LIB_CHACHA - tristate + bool help Declares whether the architecture provides an arch-specific accelerated implementation of the ChaCha library interface, @@ -58,17 +58,21 @@ config CRYPTO_LIB_CHACHA_GENERIC implementation is enabled, this implementation serves the users of CRYPTO_LIB_CHACHA. -config CRYPTO_LIB_CHACHA - tristate "ChaCha library interface" - depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA +config CRYPTO_LIB_CHACHA_INTERNAL + tristate select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n + +config CRYPTO_LIB_CHACHA + tristate + select CRYPTO + select CRYPTO_LIB_CHACHA_INTERNAL help Enable the ChaCha library interface. This interface may be fulfilled by either the generic implementation or an arch-specific one, if one is available and enabled. config CRYPTO_ARCH_HAVE_LIB_CURVE25519 - tristate + bool help Declares whether the architecture provides an arch-specific accelerated implementation of the Curve25519 library interface, @@ -76,6 +80,7 @@ config CRYPTO_ARCH_HAVE_LIB_CURVE25519 config CRYPTO_LIB_CURVE25519_GENERIC tristate + select CRYPTO_LIB_UTILS help This symbol can be depended upon by arch implementations of the Curve25519 library interface that require the generic code as a @@ -83,11 +88,14 @@ config CRYPTO_LIB_CURVE25519_GENERIC implementation is enabled, this implementation serves the users of CRYPTO_LIB_CURVE25519. -config CRYPTO_LIB_CURVE25519 - tristate "Curve25519 scalar multiplication library" - depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519 +config CRYPTO_LIB_CURVE25519_INTERNAL + tristate select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n - select CRYPTO_LIB_UTILS + +config CRYPTO_LIB_CURVE25519 + tristate + select CRYPTO + select CRYPTO_LIB_CURVE25519_INTERNAL help Enable the Curve25519 library interface. This interface may be fulfilled by either the generic implementation or an arch-specific @@ -104,7 +112,7 @@ config CRYPTO_LIB_POLY1305_RSIZE default 1 config CRYPTO_ARCH_HAVE_LIB_POLY1305 - tristate + bool help Declares whether the architecture provides an arch-specific accelerated implementation of the Poly1305 library interface, @@ -119,23 +127,24 @@ config CRYPTO_LIB_POLY1305_GENERIC implementation is enabled, this implementation serves the users of CRYPTO_LIB_POLY1305. -config CRYPTO_LIB_POLY1305 - tristate "Poly1305 library interface" - depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305 +config CRYPTO_LIB_POLY1305_INTERNAL + tristate select CRYPTO_LIB_POLY1305_GENERIC if CRYPTO_ARCH_HAVE_LIB_POLY1305=n + +config CRYPTO_LIB_POLY1305 + tristate + select CRYPTO + select CRYPTO_LIB_POLY1305_INTERNAL help Enable the Poly1305 library interface. This interface may be fulfilled by either the generic implementation or an arch-specific one, if one is available and enabled. config CRYPTO_LIB_CHACHA20POLY1305 - tristate "ChaCha20-Poly1305 AEAD support (8-byte nonce library version)" - depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA - depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305 - depends on CRYPTO + tristate select CRYPTO_LIB_CHACHA select CRYPTO_LIB_POLY1305 - select CRYPTO_ALGAPI + select CRYPTO_LIB_UTILS config CRYPTO_LIB_SHA1 tristate diff --git a/lib/crypto/chacha20poly1305.c b/lib/crypto/chacha20poly1305.c index a839c0ac60b2..9cfa886f1f89 100644 --- a/lib/crypto/chacha20poly1305.c +++ b/lib/crypto/chacha20poly1305.c @@ -7,11 +7,10 @@ * Information: https://tools.ietf.org/html/rfc8439 */ -#include <crypto/algapi.h> #include <crypto/chacha20poly1305.h> #include <crypto/chacha.h> #include <crypto/poly1305.h> -#include <crypto/scatterwalk.h> +#include <crypto/utils.h> #include <linux/unaligned.h> #include <linux/kernel.h> @@ -318,8 +317,8 @@ bool chacha20poly1305_crypt_sg_inplace(struct scatterlist *src, if (unlikely(sl > -POLY1305_DIGEST_SIZE)) { poly1305_final(&poly1305_state, b.mac[1]); - scatterwalk_map_and_copy(b.mac[encrypt], src, src_len, - sizeof(b.mac[1]), encrypt); + sg_copy_buffer(src, sg_nents(src), b.mac[encrypt], + sizeof(b.mac[1]), src_len, !encrypt); ret = encrypt || !crypto_memneq(b.mac[0], b.mac[1], POLY1305_DIGEST_SIZE); } diff --git a/lib/lzo/Makefile b/lib/lzo/Makefile index 2f58fafbbddd..fc7b2b7ef4b2 100644 --- a/lib/lzo/Makefile +++ b/lib/lzo/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only -lzo_compress-objs := lzo1x_compress.o +lzo_compress-objs := lzo1x_compress.o lzo1x_compress_safe.o lzo_decompress-objs := lzo1x_decompress_safe.o obj-$(CONFIG_LZO_COMPRESS) += lzo_compress.o diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c index 47d6d43ea957..7b10ca86a893 100644 --- a/lib/lzo/lzo1x_compress.c +++ b/lib/lzo/lzo1x_compress.c @@ -18,11 +18,22 @@ #include <linux/lzo.h> #include "lzodefs.h" -static noinline size_t -lzo1x_1_do_compress(const unsigned char *in, size_t in_len, - unsigned char *out, size_t *out_len, - size_t ti, void *wrkmem, signed char *state_offset, - const unsigned char bitstream_version) +#undef LZO_UNSAFE + +#ifndef LZO_SAFE +#define LZO_UNSAFE 1 +#define LZO_SAFE(name) name +#define HAVE_OP(x) 1 +#endif + +#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun + +static noinline int +LZO_SAFE(lzo1x_1_do_compress)(const unsigned char *in, size_t in_len, + unsigned char **out, unsigned char *op_end, + size_t *tp, void *wrkmem, + signed char *state_offset, + const unsigned char bitstream_version) { const unsigned char *ip; unsigned char *op; @@ -30,8 +41,9 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len, const unsigned char * const ip_end = in + in_len - 20; const unsigned char *ii; lzo_dict_t * const dict = (lzo_dict_t *) wrkmem; + size_t ti = *tp; - op = out; + op = *out; ip = in; ii = ip; ip += ti < 4 ? 4 - ti : 0; @@ -116,25 +128,32 @@ next: if (t != 0) { if (t <= 3) { op[*state_offset] |= t; + NEED_OP(4); COPY4(op, ii); op += t; } else if (t <= 16) { + NEED_OP(17); *op++ = (t - 3); COPY8(op, ii); COPY8(op + 8, ii + 8); op += t; } else { if (t <= 18) { + NEED_OP(1); *op++ = (t - 3); } else { size_t tt = t - 18; + NEED_OP(1); *op++ = 0; while (unlikely(tt > 255)) { tt -= 255; + NEED_OP(1); *op++ = 0; } + NEED_OP(1); *op++ = tt; } + NEED_OP(t); do { COPY8(op, ii); COPY8(op + 8, ii + 8); @@ -151,6 +170,7 @@ next: if (unlikely(run_length)) { ip += run_length; run_length -= MIN_ZERO_RUN_LENGTH; + NEED_OP(4); put_unaligned_le32((run_length << 21) | 0xfffc18 | (run_length & 0x7), op); op += 4; @@ -243,10 +263,12 @@ m_len_done: ip += m_len; if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) { m_off -= 1; + NEED_OP(2); *op++ = (((m_len - 1) << 5) | ((m_off & 7) << 2)); *op++ = (m_off >> 3); } else if (m_off <= M3_MAX_OFFSET) { m_off -= 1; + NEED_OP(1); if (m_len <= M3_MAX_LEN) *op++ = (M3_MARKER | (m_len - 2)); else { @@ -254,14 +276,18 @@ m_len_done: *op++ = M3_MARKER | 0; while (unlikely(m_len > 255)) { m_len -= 255; + NEED_OP(1); *op++ = 0; } + NEED_OP(1); *op++ = (m_len); } + NEED_OP(2); *op++ = (m_off << 2); *op++ = (m_off >> 6); } else { m_off -= 0x4000; + NEED_OP(1); if (m_len <= M4_MAX_LEN) *op++ = (M4_MARKER | ((m_off >> 11) & 8) | (m_len - 2)); @@ -282,11 +308,14 @@ m_len_done: m_len -= M4_MAX_LEN; *op++ = (M4_MARKER | ((m_off >> 11) & 8)); while (unlikely(m_len > 255)) { + NEED_OP(1); m_len -= 255; *op++ = 0; } + NEED_OP(1); *op++ = (m_len); } + NEED_OP(2); *op++ = (m_off << 2); *op++ = (m_off >> 6); } @@ -295,14 +324,20 @@ finished_writing_instruction: ii = ip; goto next; } - *out_len = op - out; - return in_end - (ii - ti); + *out = op; + *tp = in_end - (ii - ti); + return LZO_E_OK; + +output_overrun: + return LZO_E_OUTPUT_OVERRUN; } -static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, - unsigned char *out, size_t *out_len, - void *wrkmem, const unsigned char bitstream_version) +static int LZO_SAFE(lzogeneric1x_1_compress)( + const unsigned char *in, size_t in_len, + unsigned char *out, size_t *out_len, + void *wrkmem, const unsigned char bitstream_version) { + unsigned char * const op_end = out + *out_len; const unsigned char *ip = in; unsigned char *op = out; unsigned char *data_start; @@ -326,14 +361,18 @@ static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, while (l > 20) { size_t ll = min_t(size_t, l, m4_max_offset + 1); uintptr_t ll_end = (uintptr_t) ip + ll; + int err; + if ((ll_end + ((t + ll) >> 5)) <= ll_end) break; BUILD_BUG_ON(D_SIZE * sizeof(lzo_dict_t) > LZO1X_1_MEM_COMPRESS); memset(wrkmem, 0, D_SIZE * sizeof(lzo_dict_t)); - t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem, - &state_offset, bitstream_version); + err = LZO_SAFE(lzo1x_1_do_compress)( + ip, ll, &op, op_end, &t, wrkmem, + &state_offset, bitstream_version); + if (err != LZO_E_OK) + return err; ip += ll; - op += *out_len; l -= ll; } t += l; @@ -342,20 +381,26 @@ static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, const unsigned char *ii = in + in_len - t; if (op == data_start && t <= 238) { + NEED_OP(1); *op++ = (17 + t); } else if (t <= 3) { op[state_offset] |= t; } else if (t <= 18) { + NEED_OP(1); *op++ = (t - 3); } else { size_t tt = t - 18; + NEED_OP(1); *op++ = 0; while (tt > 255) { tt -= 255; + NEED_OP(1); *op++ = 0; } + NEED_OP(1); *op++ = tt; } + NEED_OP(t); if (t >= 16) do { COPY8(op, ii); COPY8(op + 8, ii + 8); @@ -368,31 +413,38 @@ static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, } while (--t > 0); } + NEED_OP(3); *op++ = M4_MARKER | 1; *op++ = 0; *op++ = 0; *out_len = op - out; return LZO_E_OK; + +output_overrun: + return LZO_E_OUTPUT_OVERRUN; } -int lzo1x_1_compress(const unsigned char *in, size_t in_len, - unsigned char *out, size_t *out_len, - void *wrkmem) +int LZO_SAFE(lzo1x_1_compress)(const unsigned char *in, size_t in_len, + unsigned char *out, size_t *out_len, + void *wrkmem) { - return lzogeneric1x_1_compress(in, in_len, out, out_len, wrkmem, 0); + return LZO_SAFE(lzogeneric1x_1_compress)( + in, in_len, out, out_len, wrkmem, 0); } -int lzorle1x_1_compress(const unsigned char *in, size_t in_len, - unsigned char *out, size_t *out_len, - void *wrkmem) +int LZO_SAFE(lzorle1x_1_compress)(const unsigned char *in, size_t in_len, + unsigned char *out, size_t *out_len, + void *wrkmem) { - return lzogeneric1x_1_compress(in, in_len, out, out_len, - wrkmem, LZO_VERSION); + return LZO_SAFE(lzogeneric1x_1_compress)( + in, in_len, out, out_len, wrkmem, LZO_VERSION); } -EXPORT_SYMBOL_GPL(lzo1x_1_compress); -EXPORT_SYMBOL_GPL(lzorle1x_1_compress); +EXPORT_SYMBOL_GPL(LZO_SAFE(lzo1x_1_compress)); +EXPORT_SYMBOL_GPL(LZO_SAFE(lzorle1x_1_compress)); +#ifndef LZO_UNSAFE MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LZO1X-1 Compressor"); +#endif diff --git a/lib/lzo/lzo1x_compress_safe.c b/lib/lzo/lzo1x_compress_safe.c new file mode 100644 index 000000000000..371c9f849492 --- /dev/null +++ b/lib/lzo/lzo1x_compress_safe.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * LZO1X Compressor from LZO + * + * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com> + * + * The full LZO package can be found at: + * http://www.oberhumer.com/opensource/lzo/ + * + * Changed for Linux kernel use by: + * Nitin Gupta <nitingupta910@gmail.com> + * Richard Purdie <rpurdie@openedhand.com> + */ + +#define LZO_SAFE(name) name##_safe +#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x)) + +#include "lzo1x_compress.c" diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 5bb6b8aff232..b58d5ef1a34b 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -879,7 +879,7 @@ EXPORT_SYMBOL(sg_miter_skip); * @miter->addr and @miter->length point to the current mapping. * * Context: - * May sleep if !SG_MITER_ATOMIC. + * May sleep if !SG_MITER_ATOMIC && !SG_MITER_LOCAL. * * Returns: * true if @miter contains the next mapping. false if end of sg @@ -901,6 +901,8 @@ bool sg_miter_next(struct sg_mapping_iter *miter) if (miter->__flags & SG_MITER_ATOMIC) miter->addr = kmap_atomic(miter->page) + miter->__offset; + else if (miter->__flags & SG_MITER_LOCAL) + miter->addr = kmap_local_page(miter->page) + miter->__offset; else miter->addr = kmap(miter->page) + miter->__offset; @@ -936,7 +938,9 @@ void sg_miter_stop(struct sg_mapping_iter *miter) if (miter->__flags & SG_MITER_ATOMIC) { WARN_ON_ONCE(!pagefault_disabled()); kunmap_atomic(miter->addr); - } else + } else if (miter->__flags & SG_MITER_LOCAL) + kunmap_local(miter->addr); + else kunmap(miter->page); miter->page = NULL; @@ -965,7 +969,7 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, { unsigned int offset = 0; struct sg_mapping_iter miter; - unsigned int sg_flags = SG_MITER_ATOMIC; + unsigned int sg_flags = SG_MITER_LOCAL; if (to_buffer) sg_flags |= SG_MITER_FROM_SG; @@ -1080,7 +1084,7 @@ size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents, { unsigned int offset = 0; struct sg_mapping_iter miter; - unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG; + unsigned int sg_flags = SG_MITER_LOCAL | SG_MITER_TO_SG; sg_miter_start(&miter, sgl, nents, sg_flags); diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c index f9e3d3d90dcf..03d508a45aae 100644 --- a/net/tls/tls_device_fallback.c +++ b/net/tls/tls_device_fallback.c @@ -37,17 +37,6 @@ #include "tls.h" -static void chain_to_walk(struct scatterlist *sg, struct scatter_walk *walk) -{ - struct scatterlist *src = walk->sg; - int diff = walk->offset - src->offset; - - sg_set_page(sg, sg_page(src), - src->length - diff, walk->offset); - - scatterwalk_crypto_chain(sg, sg_next(src), 2); -} - static int tls_enc_record(struct aead_request *aead_req, struct crypto_aead *aead, char *aad, char *iv, __be64 rcd_sn, @@ -69,16 +58,13 @@ static int tls_enc_record(struct aead_request *aead_req, buf_size = TLS_HEADER_SIZE + cipher_desc->iv; len = min_t(int, *in_len, buf_size); - scatterwalk_copychunks(buf, in, len, 0); - scatterwalk_copychunks(buf, out, len, 1); + memcpy_from_scatterwalk(buf, in, len); + memcpy_to_scatterwalk(out, buf, len); *in_len -= len; if (!*in_len) return 0; - scatterwalk_pagedone(in, 0, 1); - scatterwalk_pagedone(out, 1, 1); - len = buf[4] | (buf[3] << 8); len -= cipher_desc->iv; @@ -90,8 +76,8 @@ static int tls_enc_record(struct aead_request *aead_req, sg_init_table(sg_out, ARRAY_SIZE(sg_out)); sg_set_buf(sg_in, aad, TLS_AAD_SPACE_SIZE); sg_set_buf(sg_out, aad, TLS_AAD_SPACE_SIZE); - chain_to_walk(sg_in + 1, in); - chain_to_walk(sg_out + 1, out); + scatterwalk_get_sglist(in, sg_in + 1); + scatterwalk_get_sglist(out, sg_out + 1); *in_len -= len; if (*in_len < 0) { @@ -110,10 +96,8 @@ static int tls_enc_record(struct aead_request *aead_req, } if (*in_len) { - scatterwalk_copychunks(NULL, in, len, 2); - scatterwalk_pagedone(in, 0, 1); - scatterwalk_copychunks(NULL, out, len, 2); - scatterwalk_pagedone(out, 1, 1); + scatterwalk_skip(in, len); + scatterwalk_skip(out, len); } len -= cipher_desc->tag; @@ -162,9 +146,6 @@ static int tls_enc_records(struct aead_request *aead_req, } while (rc == 0 && len); - scatterwalk_done(&in, 0, 0); - scatterwalk_done(&out, 1, 0); - return rc; } diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c index e6da7e8495c9..749011e031c0 100644 --- a/net/xfrm/xfrm_algo.c +++ b/net/xfrm/xfrm_algo.c @@ -5,13 +5,13 @@ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> */ +#include <crypto/acompress.h> #include <crypto/aead.h> #include <crypto/hash.h> #include <crypto/skcipher.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/pfkeyv2.h> -#include <linux/crypto.h> #include <linux/scatterlist.h> #include <net/xfrm.h> #if IS_ENABLED(CONFIG_INET_ESP) || IS_ENABLED(CONFIG_INET6_ESP) @@ -669,7 +669,7 @@ static const struct xfrm_algo_list xfrm_ealg_list = { }; static const struct xfrm_algo_list xfrm_calg_list = { - .find = crypto_has_comp, + .find = crypto_has_acomp, .algs = calg_list, .entries = ARRAY_SIZE(calg_list), }; @@ -828,8 +828,7 @@ void xfrm_probe_algs(void) } for (i = 0; i < calg_entries(); i++) { - status = crypto_has_comp(calg_list[i].name, 0, - CRYPTO_ALG_ASYNC); + status = crypto_has_acomp(calg_list[i].name, 0, 0); if (calg_list[i].available != status) calg_list[i].available = status; } diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c index 9c0fa0e1786a..0c1420534394 100644 --- a/net/xfrm/xfrm_ipcomp.c +++ b/net/xfrm/xfrm_ipcomp.c @@ -3,7 +3,7 @@ * IP Payload Compression Protocol (IPComp) - RFC3173. * * Copyright (c) 2003 James Morris <jmorris@intercode.com.au> - * Copyright (c) 2003-2008 Herbert Xu <herbert@gondor.apana.org.au> + * Copyright (c) 2003-2025 Herbert Xu <herbert@gondor.apana.org.au> * * Todo: * - Tunable compression parameters. @@ -11,303 +11,302 @@ * - Adaptive compression. */ -#include <linux/crypto.h> +#include <crypto/acompress.h> #include <linux/err.h> -#include <linux/list.h> #include <linux/module.h> -#include <linux/mutex.h> -#include <linux/percpu.h> +#include <linux/skbuff_ref.h> #include <linux/slab.h> -#include <linux/smp.h> -#include <linux/vmalloc.h> -#include <net/ip.h> #include <net/ipcomp.h> #include <net/xfrm.h> -struct ipcomp_tfms { - struct list_head list; - struct crypto_comp * __percpu *tfms; - int users; +#define IPCOMP_SCRATCH_SIZE 65400 + +struct ipcomp_skb_cb { + struct xfrm_skb_cb xfrm; + struct acomp_req *req; }; -static DEFINE_MUTEX(ipcomp_resource_mutex); -static void * __percpu *ipcomp_scratches; -static int ipcomp_scratch_users; -static LIST_HEAD(ipcomp_tfms_list); +struct ipcomp_data { + u16 threshold; + struct crypto_acomp *tfm; +}; -static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) +struct ipcomp_req_extra { + struct xfrm_state *x; + struct scatterlist sg[]; +}; + +static inline struct ipcomp_skb_cb *ipcomp_cb(struct sk_buff *skb) { - struct ipcomp_data *ipcd = x->data; - const int plen = skb->len; - int dlen = IPCOMP_SCRATCH_SIZE; - const u8 *start = skb->data; - u8 *scratch = *this_cpu_ptr(ipcomp_scratches); - struct crypto_comp *tfm = *this_cpu_ptr(ipcd->tfms); - int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen); - int len; + struct ipcomp_skb_cb *cb = (void *)skb->cb; - if (err) - return err; + BUILD_BUG_ON(sizeof(*cb) > sizeof(skb->cb)); + return cb; +} - if (dlen < (plen + sizeof(struct ip_comp_hdr))) - return -EINVAL; +static int ipcomp_post_acomp(struct sk_buff *skb, int err, int hlen) +{ + struct acomp_req *req = ipcomp_cb(skb)->req; + struct ipcomp_req_extra *extra; + const int plen = skb->data_len; + struct scatterlist *dsg; + int len, dlen; - len = dlen - plen; - if (len > skb_tailroom(skb)) - len = skb_tailroom(skb); + if (unlikely(err)) + goto out_free_req; - __skb_put(skb, len); + extra = acomp_request_extra(req); + dsg = extra->sg; + dlen = req->dlen; - len += plen; - skb_copy_to_linear_data(skb, scratch, len); + pskb_trim_unique(skb, 0); + __skb_put(skb, hlen); - while ((scratch += len, dlen -= len) > 0) { + /* Only update truesize on input. */ + if (!hlen) + skb->truesize += dlen - plen; + skb->data_len = dlen; + skb->len += dlen; + + do { skb_frag_t *frag; struct page *page; - if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) - return -EMSGSIZE; - frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags; - page = alloc_page(GFP_ATOMIC); - - if (!page) - return -ENOMEM; + page = sg_page(dsg); + dsg = sg_next(dsg); len = PAGE_SIZE; if (dlen < len) len = dlen; skb_frag_fill_page_desc(frag, page, 0, len); - memcpy(skb_frag_address(frag), scratch, len); - - skb->truesize += len; - skb->data_len += len; - skb->len += len; skb_shinfo(skb)->nr_frags++; - } + } while ((dlen -= len)); - return 0; + for (; dsg; dsg = sg_next(dsg)) + __free_page(sg_page(dsg)); + +out_free_req: + acomp_request_free(req); + return err; } -int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb) +static int ipcomp_input_done2(struct sk_buff *skb, int err) { - int nexthdr; - int err = -ENOMEM; - struct ip_comp_hdr *ipch; - - if (skb_linearize_cow(skb)) - goto out; - - skb->ip_summed = CHECKSUM_NONE; + struct ip_comp_hdr *ipch = ip_comp_hdr(skb); + const int plen = skb->len; - /* Remove ipcomp header and decompress original payload */ - ipch = (void *)skb->data; - nexthdr = ipch->nexthdr; + skb_reset_transport_header(skb); - skb->transport_header = skb->network_header + sizeof(*ipch); - __skb_pull(skb, sizeof(*ipch)); - err = ipcomp_decompress(x, skb); - if (err) - goto out; + return ipcomp_post_acomp(skb, err, 0) ?: + skb->len < (plen + sizeof(ip_comp_hdr)) ? -EINVAL : + ipch->nexthdr; +} - err = nexthdr; +static void ipcomp_input_done(void *data, int err) +{ + struct sk_buff *skb = data; -out: - return err; + xfrm_input_resume(skb, ipcomp_input_done2(skb, err)); } -EXPORT_SYMBOL_GPL(ipcomp_input); -static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb) +static struct acomp_req *ipcomp_setup_req(struct xfrm_state *x, + struct sk_buff *skb, int minhead, + int dlen) { + const int dnfrags = min(MAX_SKB_FRAGS, 16); struct ipcomp_data *ipcd = x->data; + struct ipcomp_req_extra *extra; + struct scatterlist *sg, *dsg; const int plen = skb->len; - int dlen = IPCOMP_SCRATCH_SIZE; - u8 *start = skb->data; - struct crypto_comp *tfm; - u8 *scratch; + struct crypto_acomp *tfm; + struct acomp_req *req; + int nfrags; + int total; int err; + int i; - local_bh_disable(); - scratch = *this_cpu_ptr(ipcomp_scratches); - tfm = *this_cpu_ptr(ipcd->tfms); - err = crypto_comp_compress(tfm, start, plen, scratch, &dlen); - if (err) - goto out; - - if ((dlen + sizeof(struct ip_comp_hdr)) >= plen) { - err = -EMSGSIZE; - goto out; - } + ipcomp_cb(skb)->req = NULL; - memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen); - local_bh_enable(); + do { + struct sk_buff *trailer; - pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr)); - return 0; + if (skb->len > PAGE_SIZE) { + if (skb_linearize_cow(skb)) + return ERR_PTR(-ENOMEM); + nfrags = 1; + break; + } -out: - local_bh_enable(); - return err; -} + if (!skb_cloned(skb) && skb_headlen(skb) >= minhead) { + if (!skb_is_nonlinear(skb)) { + nfrags = 1; + break; + } else if (!skb_has_frag_list(skb)) { + nfrags = skb_shinfo(skb)->nr_frags; + nfrags++; + break; + } + } -int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb) -{ - int err; - struct ip_comp_hdr *ipch; - struct ipcomp_data *ipcd = x->data; + nfrags = skb_cow_data(skb, skb_headlen(skb) < minhead ? + minhead - skb_headlen(skb) : 0, + &trailer); + if (nfrags < 0) + return ERR_PTR(nfrags); + } while (0); + + tfm = ipcd->tfm; + req = acomp_request_alloc_extra( + tfm, sizeof(*extra) + sizeof(*sg) * (nfrags + dnfrags), + GFP_ATOMIC); + ipcomp_cb(skb)->req = req; + if (!req) + return ERR_PTR(-ENOMEM); + + extra = acomp_request_extra(req); + extra->x = x; + + dsg = extra->sg; + sg = dsg + dnfrags; + sg_init_table(sg, nfrags); + err = skb_to_sgvec(skb, sg, 0, plen); + if (unlikely(err < 0)) + return ERR_PTR(err); + + sg_init_table(dsg, dnfrags); + total = 0; + for (i = 0; i < dnfrags && total < dlen; i++) { + struct page *page; - if (skb->len < ipcd->threshold) { - /* Don't bother compressing */ - goto out_ok; + page = alloc_page(GFP_ATOMIC); + if (!page) + break; + sg_set_page(dsg + i, page, PAGE_SIZE, 0); + total += PAGE_SIZE; } + if (!i) + return ERR_PTR(-ENOMEM); + sg_mark_end(dsg + i - 1); + dlen = min(dlen, total); - if (skb_linearize_cow(skb)) - goto out_ok; - - err = ipcomp_compress(x, skb); - - if (err) { - goto out_ok; - } + acomp_request_set_params(req, sg, dsg, plen, dlen); - /* Install ipcomp header, convert into ipcomp datagram. */ - ipch = ip_comp_hdr(skb); - ipch->nexthdr = *skb_mac_header(skb); - ipch->flags = 0; - ipch->cpi = htons((u16 )ntohl(x->id.spi)); - *skb_mac_header(skb) = IPPROTO_COMP; -out_ok: - skb_push(skb, -skb_network_offset(skb)); - return 0; + return req; } -EXPORT_SYMBOL_GPL(ipcomp_output); -static void ipcomp_free_scratches(void) +static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb) { - int i; - void * __percpu *scratches; - - if (--ipcomp_scratch_users) - return; + struct acomp_req *req; + int err; - scratches = ipcomp_scratches; - if (!scratches) - return; + req = ipcomp_setup_req(x, skb, 0, IPCOMP_SCRATCH_SIZE); + err = PTR_ERR(req); + if (IS_ERR(req)) + goto out; - for_each_possible_cpu(i) - vfree(*per_cpu_ptr(scratches, i)); + acomp_request_set_callback(req, 0, ipcomp_input_done, skb); + err = crypto_acomp_decompress(req); + if (err == -EINPROGRESS) + return err; - free_percpu(scratches); - ipcomp_scratches = NULL; +out: + return ipcomp_input_done2(skb, err); } -static void * __percpu *ipcomp_alloc_scratches(void) +int ipcomp_input(struct xfrm_state *x, struct sk_buff *skb) { - void * __percpu *scratches; - int i; - - if (ipcomp_scratch_users++) - return ipcomp_scratches; - - scratches = alloc_percpu(void *); - if (!scratches) - return NULL; + struct ip_comp_hdr *ipch __maybe_unused; - ipcomp_scratches = scratches; + if (!pskb_may_pull(skb, sizeof(*ipch))) + return -EINVAL; - for_each_possible_cpu(i) { - void *scratch; + skb->ip_summed = CHECKSUM_NONE; - scratch = vmalloc_node(IPCOMP_SCRATCH_SIZE, cpu_to_node(i)); - if (!scratch) - return NULL; - *per_cpu_ptr(scratches, i) = scratch; - } + /* Remove ipcomp header and decompress original payload */ + __skb_pull(skb, sizeof(*ipch)); - return scratches; + return ipcomp_decompress(x, skb); } +EXPORT_SYMBOL_GPL(ipcomp_input); -static void ipcomp_free_tfms(struct crypto_comp * __percpu *tfms) +static int ipcomp_output_push(struct sk_buff *skb) { - struct ipcomp_tfms *pos; - int cpu; - - list_for_each_entry(pos, &ipcomp_tfms_list, list) { - if (pos->tfms == tfms) - break; - } - - WARN_ON(list_entry_is_head(pos, &ipcomp_tfms_list, list)); - - if (--pos->users) - return; + skb_push(skb, -skb_network_offset(skb)); + return 0; +} - list_del(&pos->list); - kfree(pos); +static int ipcomp_output_done2(struct xfrm_state *x, struct sk_buff *skb, + int err) +{ + struct ip_comp_hdr *ipch; - if (!tfms) - return; + err = ipcomp_post_acomp(skb, err, sizeof(*ipch)); + if (err) + goto out_ok; - for_each_possible_cpu(cpu) { - struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu); - crypto_free_comp(tfm); - } - free_percpu(tfms); + /* Install ipcomp header, convert into ipcomp datagram. */ + ipch = ip_comp_hdr(skb); + ipch->nexthdr = *skb_mac_header(skb); + ipch->flags = 0; + ipch->cpi = htons((u16 )ntohl(x->id.spi)); + *skb_mac_header(skb) = IPPROTO_COMP; +out_ok: + return ipcomp_output_push(skb); } -static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name) +static void ipcomp_output_done(void *data, int err) { - struct ipcomp_tfms *pos; - struct crypto_comp * __percpu *tfms; - int cpu; + struct ipcomp_req_extra *extra; + struct sk_buff *skb = data; + struct acomp_req *req; + req = ipcomp_cb(skb)->req; + extra = acomp_request_extra(req); - list_for_each_entry(pos, &ipcomp_tfms_list, list) { - struct crypto_comp *tfm; + xfrm_output_resume(skb_to_full_sk(skb), skb, + ipcomp_output_done2(extra->x, skb, err)); +} - /* This can be any valid CPU ID so we don't need locking. */ - tfm = this_cpu_read(*pos->tfms); +static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb) +{ + struct ip_comp_hdr *ipch __maybe_unused; + struct acomp_req *req; + int err; - if (!strcmp(crypto_comp_name(tfm), alg_name)) { - pos->users++; - return pos->tfms; - } - } + req = ipcomp_setup_req(x, skb, sizeof(*ipch), + skb->len - sizeof(*ipch)); + err = PTR_ERR(req); + if (IS_ERR(req)) + goto out; - pos = kmalloc(sizeof(*pos), GFP_KERNEL); - if (!pos) - return NULL; + acomp_request_set_callback(req, 0, ipcomp_output_done, skb); + err = crypto_acomp_compress(req); + if (err == -EINPROGRESS) + return err; - pos->users = 1; - INIT_LIST_HEAD(&pos->list); - list_add(&pos->list, &ipcomp_tfms_list); +out: + return ipcomp_output_done2(x, skb, err); +} - pos->tfms = tfms = alloc_percpu(struct crypto_comp *); - if (!tfms) - goto error; +int ipcomp_output(struct xfrm_state *x, struct sk_buff *skb) +{ + struct ipcomp_data *ipcd = x->data; - for_each_possible_cpu(cpu) { - struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0, - CRYPTO_ALG_ASYNC); - if (IS_ERR(tfm)) - goto error; - *per_cpu_ptr(tfms, cpu) = tfm; + if (skb->len < ipcd->threshold) { + /* Don't bother compressing */ + return ipcomp_output_push(skb); } - return tfms; - -error: - ipcomp_free_tfms(tfms); - return NULL; + return ipcomp_compress(x, skb); } +EXPORT_SYMBOL_GPL(ipcomp_output); static void ipcomp_free_data(struct ipcomp_data *ipcd) { - if (ipcd->tfms) - ipcomp_free_tfms(ipcd->tfms); - ipcomp_free_scratches(); + crypto_free_acomp(ipcd->tfm); } void ipcomp_destroy(struct xfrm_state *x) @@ -316,9 +315,7 @@ void ipcomp_destroy(struct xfrm_state *x) if (!ipcd) return; xfrm_state_delete_tunnel(x); - mutex_lock(&ipcomp_resource_mutex); ipcomp_free_data(ipcd); - mutex_unlock(&ipcomp_resource_mutex); kfree(ipcd); } EXPORT_SYMBOL_GPL(ipcomp_destroy); @@ -345,14 +342,9 @@ int ipcomp_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack) if (!ipcd) goto out; - mutex_lock(&ipcomp_resource_mutex); - if (!ipcomp_alloc_scratches()) - goto error; - - ipcd->tfms = ipcomp_alloc_tfms(x->calg->alg_name); - if (!ipcd->tfms) + ipcd->tfm = crypto_alloc_acomp(x->calg->alg_name, 0, 0); + if (IS_ERR(ipcd->tfm)) goto error; - mutex_unlock(&ipcomp_resource_mutex); calg_desc = xfrm_calg_get_byname(x->calg->alg_name, 0); BUG_ON(!calg_desc); @@ -364,7 +356,6 @@ out: error: ipcomp_free_data(ipcd); - mutex_unlock(&ipcomp_resource_mutex); kfree(ipcd); goto out; } diff --git a/security/keys/Kconfig b/security/keys/Kconfig index abb03a1b2a5c..d4f5fc1e7263 100644 --- a/security/keys/Kconfig +++ b/security/keys/Kconfig @@ -60,7 +60,7 @@ config BIG_KEYS bool "Large payload keys" depends on KEYS depends on TMPFS - depends on CRYPTO_LIB_CHACHA20POLY1305 = y + select CRYPTO_LIB_CHACHA20POLY1305 help This option provides support for holding large keys within the kernel (for example Kerberos ticket caches). The data may be stored out to diff --git a/tools/testing/crypto/chacha20-s390/test-cipher.c b/tools/testing/crypto/chacha20-s390/test-cipher.c index 8141d45df51a..35ea65c54ffa 100644 --- a/tools/testing/crypto/chacha20-s390/test-cipher.c +++ b/tools/testing/crypto/chacha20-s390/test-cipher.c @@ -66,7 +66,7 @@ static int test_lib_chacha(u8 *revert, u8 *cipher, u8 *plain) } /* Encrypt */ - chacha_init_arch(chacha_state, (u32*)key, iv); + chacha_init(chacha_state, (u32 *)key, iv); start = ktime_get_ns(); chacha_crypt_arch(chacha_state, cipher, plain, data_size, 20); @@ -81,7 +81,7 @@ static int test_lib_chacha(u8 *revert, u8 *cipher, u8 *plain) pr_info("lib encryption took: %lld nsec", end - start); /* Decrypt */ - chacha_init_arch(chacha_state, (u32 *)key, iv); + chacha_init(chacha_state, (u32 *)key, iv); start = ktime_get_ns(); chacha_crypt_arch(chacha_state, revert, cipher, data_size, 20); |