diff options
Diffstat (limited to 'arch/mips')
40 files changed, 323 insertions, 931 deletions
diff --git a/arch/mips/ath25/ar2315.c b/arch/mips/ath25/ar2315.c index 8ccf167c167e..e8c38aaf46a2 100644 --- a/arch/mips/ath25/ar2315.c +++ b/arch/mips/ath25/ar2315.c @@ -149,8 +149,8 @@ void __init ar2315_arch_init_irq(void) ath25_irq_dispatch = ar2315_irq_dispatch; - domain = irq_domain_add_linear(NULL, AR2315_MISC_IRQ_COUNT, - &ar2315_misc_irq_domain_ops, NULL); + domain = irq_domain_create_linear(NULL, AR2315_MISC_IRQ_COUNT, + &ar2315_misc_irq_domain_ops, NULL); if (!domain) panic("Failed to add IRQ domain"); diff --git a/arch/mips/ath25/ar5312.c b/arch/mips/ath25/ar5312.c index cfa103518113..4a1d874be766 100644 --- a/arch/mips/ath25/ar5312.c +++ b/arch/mips/ath25/ar5312.c @@ -143,8 +143,8 @@ void __init ar5312_arch_init_irq(void) ath25_irq_dispatch = ar5312_irq_dispatch; - domain = irq_domain_add_linear(NULL, AR5312_MISC_IRQ_COUNT, - &ar5312_misc_irq_domain_ops, NULL); + domain = irq_domain_create_linear(NULL, AR5312_MISC_IRQ_COUNT, + &ar5312_misc_irq_domain_ops, NULL); if (!domain) panic("Failed to add IRQ domain"); diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c index 247be207f293..de426a474b5b 100644 --- a/arch/mips/bcm47xx/setup.c +++ b/arch/mips/bcm47xx/setup.c @@ -282,7 +282,7 @@ static int __init bcm47xx_register_bus_complete(void) bcm47xx_leds_register(); bcm47xx_workarounds(); - fixed_phy_add(PHY_POLL, 0, &bcm47xx_fixed_phy_status); + fixed_phy_add(0, &bcm47xx_fixed_phy_status); return 0; } device_initcall(bcm47xx_register_bus_complete); diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig index 450e979ef5d9..11f4aa6e80e9 100644 --- a/arch/mips/cavium-octeon/Kconfig +++ b/arch/mips/cavium-octeon/Kconfig @@ -23,6 +23,12 @@ config CAVIUM_OCTEON_CVMSEG_SIZE legally range is from zero to 54 cache blocks (i.e. CVMSEG LM is between zero and 6192 bytes). +config CRYPTO_SHA256_OCTEON + tristate + default CRYPTO_LIB_SHA256 + select CRYPTO_ARCH_HAVE_LIB_SHA256 + select CRYPTO_LIB_SHA256_GENERIC + endif # CPU_CAVIUM_OCTEON if CAVIUM_OCTEON_SOC diff --git a/arch/mips/cavium-octeon/crypto/octeon-md5.c b/arch/mips/cavium-octeon/crypto/octeon-md5.c index 5ee4ade99b99..fbc84eb7fedf 100644 --- a/arch/mips/cavium-octeon/crypto/octeon-md5.c +++ b/arch/mips/cavium-octeon/crypto/octeon-md5.c @@ -19,22 +19,26 @@ * any later version. */ +#include <asm/octeon/octeon.h> +#include <crypto/internal/hash.h> #include <crypto/md5.h> -#include <linux/init.h> -#include <linux/types.h> +#include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> -#include <asm/byteorder.h> -#include <asm/octeon/octeon.h> -#include <crypto/internal/hash.h> +#include <linux/unaligned.h> #include "octeon-crypto.h" +struct octeon_md5_state { + __le32 hash[MD5_HASH_WORDS]; + u64 byte_count; +}; + /* * We pass everything as 64-bit. OCTEON can handle misaligned data. */ -static void octeon_md5_store_hash(struct md5_state *ctx) +static void octeon_md5_store_hash(struct octeon_md5_state *ctx) { u64 *hash = (u64 *)ctx->hash; @@ -42,7 +46,7 @@ static void octeon_md5_store_hash(struct md5_state *ctx) write_octeon_64bit_hash_dword(hash[1], 1); } -static void octeon_md5_read_hash(struct md5_state *ctx) +static void octeon_md5_read_hash(struct octeon_md5_state *ctx) { u64 *hash = (u64 *)ctx->hash; @@ -66,13 +70,12 @@ static void octeon_md5_transform(const void *_block) static int octeon_md5_init(struct shash_desc *desc) { - struct md5_state *mctx = shash_desc_ctx(desc); + struct octeon_md5_state *mctx = shash_desc_ctx(desc); - mctx->hash[0] = MD5_H0; - mctx->hash[1] = MD5_H1; - mctx->hash[2] = MD5_H2; - mctx->hash[3] = MD5_H3; - cpu_to_le32_array(mctx->hash, 4); + mctx->hash[0] = cpu_to_le32(MD5_H0); + mctx->hash[1] = cpu_to_le32(MD5_H1); + mctx->hash[2] = cpu_to_le32(MD5_H2); + mctx->hash[3] = cpu_to_le32(MD5_H3); mctx->byte_count = 0; return 0; @@ -81,52 +84,38 @@ static int octeon_md5_init(struct shash_desc *desc) static int octeon_md5_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - struct md5_state *mctx = shash_desc_ctx(desc); - const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); + struct octeon_md5_state *mctx = shash_desc_ctx(desc); struct octeon_cop2_state state; unsigned long flags; mctx->byte_count += len; - - if (avail > len) { - memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), - data, len); - return 0; - } - - memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data, - avail); - flags = octeon_crypto_enable(&state); octeon_md5_store_hash(mctx); - octeon_md5_transform(mctx->block); - data += avail; - len -= avail; - - while (len >= sizeof(mctx->block)) { + do { octeon_md5_transform(data); - data += sizeof(mctx->block); - len -= sizeof(mctx->block); - } + data += MD5_HMAC_BLOCK_SIZE; + len -= MD5_HMAC_BLOCK_SIZE; + } while (len >= MD5_HMAC_BLOCK_SIZE); octeon_md5_read_hash(mctx); octeon_crypto_disable(&state, flags); - - memcpy(mctx->block, data, len); - - return 0; + mctx->byte_count -= len; + return len; } -static int octeon_md5_final(struct shash_desc *desc, u8 *out) +static int octeon_md5_finup(struct shash_desc *desc, const u8 *src, + unsigned int offset, u8 *out) { - struct md5_state *mctx = shash_desc_ctx(desc); - const unsigned int offset = mctx->byte_count & 0x3f; - char *p = (char *)mctx->block + offset; + struct octeon_md5_state *mctx = shash_desc_ctx(desc); int padding = 56 - (offset + 1); struct octeon_cop2_state state; + u32 block[MD5_BLOCK_WORDS]; unsigned long flags; + char *p; + p = memcpy(block, src, offset); + p += offset; *p++ = 0x80; flags = octeon_crypto_enable(&state); @@ -134,39 +123,56 @@ static int octeon_md5_final(struct shash_desc *desc, u8 *out) if (padding < 0) { memset(p, 0x00, padding + sizeof(u64)); - octeon_md5_transform(mctx->block); - p = (char *)mctx->block; + octeon_md5_transform(block); + p = (char *)block; padding = 56; } memset(p, 0, padding); - mctx->block[14] = mctx->byte_count << 3; - mctx->block[15] = mctx->byte_count >> 29; - cpu_to_le32_array(mctx->block + 14, 2); - octeon_md5_transform(mctx->block); + mctx->byte_count += offset; + block[14] = mctx->byte_count << 3; + block[15] = mctx->byte_count >> 29; + cpu_to_le32_array(block + 14, 2); + octeon_md5_transform(block); octeon_md5_read_hash(mctx); octeon_crypto_disable(&state, flags); + memzero_explicit(block, sizeof(block)); memcpy(out, mctx->hash, sizeof(mctx->hash)); - memset(mctx, 0, sizeof(*mctx)); return 0; } static int octeon_md5_export(struct shash_desc *desc, void *out) { - struct md5_state *ctx = shash_desc_ctx(desc); - - memcpy(out, ctx, sizeof(*ctx)); + struct octeon_md5_state *ctx = shash_desc_ctx(desc); + union { + u8 *u8; + u32 *u32; + u64 *u64; + } p = { .u8 = out }; + int i; + + for (i = 0; i < MD5_HASH_WORDS; i++) + put_unaligned(le32_to_cpu(ctx->hash[i]), p.u32++); + put_unaligned(ctx->byte_count, p.u64); return 0; } static int octeon_md5_import(struct shash_desc *desc, const void *in) { - struct md5_state *ctx = shash_desc_ctx(desc); - - memcpy(ctx, in, sizeof(*ctx)); + struct octeon_md5_state *ctx = shash_desc_ctx(desc); + union { + const u8 *u8; + const u32 *u32; + const u64 *u64; + } p = { .u8 = in }; + int i; + + for (i = 0; i < MD5_HASH_WORDS; i++) + ctx->hash[i] = cpu_to_le32(get_unaligned(p.u32++)); + ctx->byte_count = get_unaligned(p.u64); return 0; } @@ -174,15 +180,16 @@ static struct shash_alg alg = { .digestsize = MD5_DIGEST_SIZE, .init = octeon_md5_init, .update = octeon_md5_update, - .final = octeon_md5_final, + .finup = octeon_md5_finup, .export = octeon_md5_export, .import = octeon_md5_import, - .descsize = sizeof(struct md5_state), - .statesize = sizeof(struct md5_state), + .statesize = MD5_STATE_SIZE, + .descsize = sizeof(struct octeon_md5_state), .base = { .cra_name = "md5", .cra_driver_name= "octeon-md5", .cra_priority = OCTEON_CR_OPCODE_PRIORITY, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha1.c b/arch/mips/cavium-octeon/crypto/octeon-sha1.c index 37a07b3c4568..e70f21a473da 100644 --- a/arch/mips/cavium-octeon/crypto/octeon-sha1.c +++ b/arch/mips/cavium-octeon/crypto/octeon-sha1.c @@ -13,15 +13,13 @@ * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> */ -#include <linux/mm.h> +#include <asm/octeon/octeon.h> +#include <crypto/internal/hash.h> #include <crypto/sha1.h> #include <crypto/sha1_base.h> -#include <linux/init.h> -#include <linux/types.h> +#include <linux/errno.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <asm/byteorder.h> -#include <asm/octeon/octeon.h> -#include <crypto/internal/hash.h> #include "octeon-crypto.h" @@ -58,49 +56,23 @@ static void octeon_sha1_read_hash(struct sha1_state *sctx) memzero_explicit(&hash_tail.dword, sizeof(hash_tail.dword)); } -static void octeon_sha1_transform(const void *_block) +static void octeon_sha1_transform(struct sha1_state *sctx, const u8 *src, + int blocks) { - const u64 *block = _block; - - write_octeon_64bit_block_dword(block[0], 0); - write_octeon_64bit_block_dword(block[1], 1); - write_octeon_64bit_block_dword(block[2], 2); - write_octeon_64bit_block_dword(block[3], 3); - write_octeon_64bit_block_dword(block[4], 4); - write_octeon_64bit_block_dword(block[5], 5); - write_octeon_64bit_block_dword(block[6], 6); - octeon_sha1_start(block[7]); -} - -static void __octeon_sha1_update(struct sha1_state *sctx, const u8 *data, - unsigned int len) -{ - unsigned int partial; - unsigned int done; - const u8 *src; - - partial = sctx->count % SHA1_BLOCK_SIZE; - sctx->count += len; - done = 0; - src = data; - - if ((partial + len) >= SHA1_BLOCK_SIZE) { - if (partial) { - done = -partial; - memcpy(sctx->buffer + partial, data, - done + SHA1_BLOCK_SIZE); - src = sctx->buffer; - } - - do { - octeon_sha1_transform(src); - done += SHA1_BLOCK_SIZE; - src = data + done; - } while (done + SHA1_BLOCK_SIZE <= len); - - partial = 0; - } - memcpy(sctx->buffer + partial, src, len - done); + do { + const u64 *block = (const u64 *)src; + + write_octeon_64bit_block_dword(block[0], 0); + write_octeon_64bit_block_dword(block[1], 1); + write_octeon_64bit_block_dword(block[2], 2); + write_octeon_64bit_block_dword(block[3], 3); + write_octeon_64bit_block_dword(block[4], 4); + write_octeon_64bit_block_dword(block[5], 5); + write_octeon_64bit_block_dword(block[6], 6); + octeon_sha1_start(block[7]); + + src += SHA1_BLOCK_SIZE; + } while (--blocks); } static int octeon_sha1_update(struct shash_desc *desc, const u8 *data, @@ -109,95 +81,47 @@ static int octeon_sha1_update(struct shash_desc *desc, const u8 *data, struct sha1_state *sctx = shash_desc_ctx(desc); struct octeon_cop2_state state; unsigned long flags; - - /* - * Small updates never reach the crypto engine, so the generic sha1 is - * faster because of the heavyweight octeon_crypto_enable() / - * octeon_crypto_disable(). - */ - if ((sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE) - return crypto_sha1_update(desc, data, len); + int remain; flags = octeon_crypto_enable(&state); octeon_sha1_store_hash(sctx); - __octeon_sha1_update(sctx, data, len); + remain = sha1_base_do_update_blocks(desc, data, len, + octeon_sha1_transform); octeon_sha1_read_hash(sctx); octeon_crypto_disable(&state, flags); - - return 0; + return remain; } -static int octeon_sha1_final(struct shash_desc *desc, u8 *out) +static int octeon_sha1_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *out) { struct sha1_state *sctx = shash_desc_ctx(desc); - static const u8 padding[64] = { 0x80, }; struct octeon_cop2_state state; - __be32 *dst = (__be32 *)out; - unsigned int pad_len; unsigned long flags; - unsigned int index; - __be64 bits; - int i; - - /* Save number of bits. */ - bits = cpu_to_be64(sctx->count << 3); - - /* Pad out to 56 mod 64. */ - index = sctx->count & 0x3f; - pad_len = (index < 56) ? (56 - index) : ((64+56) - index); flags = octeon_crypto_enable(&state); octeon_sha1_store_hash(sctx); - __octeon_sha1_update(sctx, padding, pad_len); - - /* Append length (before padding). */ - __octeon_sha1_update(sctx, (const u8 *)&bits, sizeof(bits)); + sha1_base_do_finup(desc, src, len, octeon_sha1_transform); octeon_sha1_read_hash(sctx); octeon_crypto_disable(&state, flags); - - /* Store state in digest */ - for (i = 0; i < 5; i++) - dst[i] = cpu_to_be32(sctx->state[i]); - - /* Zeroize sensitive information. */ - memset(sctx, 0, sizeof(*sctx)); - - return 0; -} - -static int octeon_sha1_export(struct shash_desc *desc, void *out) -{ - struct sha1_state *sctx = shash_desc_ctx(desc); - - memcpy(out, sctx, sizeof(*sctx)); - return 0; -} - -static int octeon_sha1_import(struct shash_desc *desc, const void *in) -{ - struct sha1_state *sctx = shash_desc_ctx(desc); - - memcpy(sctx, in, sizeof(*sctx)); - return 0; + return sha1_base_finish(desc, out); } static struct shash_alg octeon_sha1_alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_base_init, .update = octeon_sha1_update, - .final = octeon_sha1_final, - .export = octeon_sha1_export, - .import = octeon_sha1_import, - .descsize = sizeof(struct sha1_state), - .statesize = sizeof(struct sha1_state), + .finup = octeon_sha1_finup, + .descsize = SHA1_STATE_SIZE, .base = { .cra_name = "sha1", .cra_driver_name= "octeon-sha1", .cra_priority = OCTEON_CR_OPCODE_PRIORITY, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha256.c b/arch/mips/cavium-octeon/crypto/octeon-sha256.c index 435e4a6e7f13..f93faaf1f4af 100644 --- a/arch/mips/cavium-octeon/crypto/octeon-sha256.c +++ b/arch/mips/cavium-octeon/crypto/octeon-sha256.c @@ -1,8 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * Cryptographic API. - * - * SHA-224 and SHA-256 Secure Hash Algorithm. + * SHA-256 Secure Hash Algorithm. * * Adapted for OCTEON by Aaro Koskinen <aaro.koskinen@iki.fi>. * @@ -14,15 +12,10 @@ * SHA224 Support Copyright 2007 Intel Corporation <jonathan.lynch@intel.com> */ -#include <linux/mm.h> -#include <crypto/sha2.h> -#include <crypto/sha256_base.h> -#include <linux/init.h> -#include <linux/types.h> -#include <linux/module.h> -#include <asm/byteorder.h> #include <asm/octeon/octeon.h> -#include <crypto/internal/hash.h> +#include <crypto/internal/sha2.h> +#include <linux/kernel.h> +#include <linux/module.h> #include "octeon-crypto.h" @@ -30,212 +23,51 @@ * We pass everything as 64-bit. OCTEON can handle misaligned data. */ -static void octeon_sha256_store_hash(struct sha256_state *sctx) -{ - u64 *hash = (u64 *)sctx->state; - - write_octeon_64bit_hash_dword(hash[0], 0); - write_octeon_64bit_hash_dword(hash[1], 1); - write_octeon_64bit_hash_dword(hash[2], 2); - write_octeon_64bit_hash_dword(hash[3], 3); -} - -static void octeon_sha256_read_hash(struct sha256_state *sctx) -{ - u64 *hash = (u64 *)sctx->state; - - hash[0] = read_octeon_64bit_hash_dword(0); - hash[1] = read_octeon_64bit_hash_dword(1); - hash[2] = read_octeon_64bit_hash_dword(2); - hash[3] = read_octeon_64bit_hash_dword(3); -} - -static void octeon_sha256_transform(const void *_block) -{ - const u64 *block = _block; - - write_octeon_64bit_block_dword(block[0], 0); - write_octeon_64bit_block_dword(block[1], 1); - write_octeon_64bit_block_dword(block[2], 2); - write_octeon_64bit_block_dword(block[3], 3); - write_octeon_64bit_block_dword(block[4], 4); - write_octeon_64bit_block_dword(block[5], 5); - write_octeon_64bit_block_dword(block[6], 6); - octeon_sha256_start(block[7]); -} - -static void __octeon_sha256_update(struct sha256_state *sctx, const u8 *data, - unsigned int len) -{ - unsigned int partial; - unsigned int done; - const u8 *src; - - partial = sctx->count % SHA256_BLOCK_SIZE; - sctx->count += len; - done = 0; - src = data; - - if ((partial + len) >= SHA256_BLOCK_SIZE) { - if (partial) { - done = -partial; - memcpy(sctx->buf + partial, data, - done + SHA256_BLOCK_SIZE); - src = sctx->buf; - } - - do { - octeon_sha256_transform(src); - done += SHA256_BLOCK_SIZE; - src = data + done; - } while (done + SHA256_BLOCK_SIZE <= len); - - partial = 0; - } - memcpy(sctx->buf + partial, src, len - done); -} - -static int octeon_sha256_update(struct shash_desc *desc, const u8 *data, - unsigned int len) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - struct octeon_cop2_state state; - unsigned long flags; - - /* - * Small updates never reach the crypto engine, so the generic sha256 is - * faster because of the heavyweight octeon_crypto_enable() / - * octeon_crypto_disable(). - */ - if ((sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE) - return crypto_sha256_update(desc, data, len); - - flags = octeon_crypto_enable(&state); - octeon_sha256_store_hash(sctx); - - __octeon_sha256_update(sctx, data, len); - - octeon_sha256_read_hash(sctx); - octeon_crypto_disable(&state, flags); - - return 0; -} - -static int octeon_sha256_final(struct shash_desc *desc, u8 *out) +void sha256_blocks_arch(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks) { - struct sha256_state *sctx = shash_desc_ctx(desc); - static const u8 padding[64] = { 0x80, }; - struct octeon_cop2_state state; - __be32 *dst = (__be32 *)out; - unsigned int pad_len; + struct octeon_cop2_state cop2_state; + u64 *state64 = (u64 *)state; unsigned long flags; - unsigned int index; - __be64 bits; - int i; - - /* Save number of bits. */ - bits = cpu_to_be64(sctx->count << 3); - - /* Pad out to 56 mod 64. */ - index = sctx->count & 0x3f; - pad_len = (index < 56) ? (56 - index) : ((64+56) - index); - - flags = octeon_crypto_enable(&state); - octeon_sha256_store_hash(sctx); - - __octeon_sha256_update(sctx, padding, pad_len); - - /* Append length (before padding). */ - __octeon_sha256_update(sctx, (const u8 *)&bits, sizeof(bits)); - - octeon_sha256_read_hash(sctx); - octeon_crypto_disable(&state, flags); - - /* Store state in digest */ - for (i = 0; i < 8; i++) - dst[i] = cpu_to_be32(sctx->state[i]); - - /* Zeroize sensitive information. */ - memset(sctx, 0, sizeof(*sctx)); - - return 0; -} - -static int octeon_sha224_final(struct shash_desc *desc, u8 *hash) -{ - u8 D[SHA256_DIGEST_SIZE]; - - octeon_sha256_final(desc, D); - memcpy(hash, D, SHA224_DIGEST_SIZE); - memzero_explicit(D, SHA256_DIGEST_SIZE); - - return 0; -} - -static int octeon_sha256_export(struct shash_desc *desc, void *out) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - - memcpy(out, sctx, sizeof(*sctx)); - return 0; -} - -static int octeon_sha256_import(struct shash_desc *desc, const void *in) -{ - struct sha256_state *sctx = shash_desc_ctx(desc); - - memcpy(sctx, in, sizeof(*sctx)); - return 0; -} - -static struct shash_alg octeon_sha256_algs[2] = { { - .digestsize = SHA256_DIGEST_SIZE, - .init = sha256_base_init, - .update = octeon_sha256_update, - .final = octeon_sha256_final, - .export = octeon_sha256_export, - .import = octeon_sha256_import, - .descsize = sizeof(struct sha256_state), - .statesize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha256", - .cra_driver_name= "octeon-sha256", - .cra_priority = OCTEON_CR_OPCODE_PRIORITY, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -}, { - .digestsize = SHA224_DIGEST_SIZE, - .init = sha224_base_init, - .update = octeon_sha256_update, - .final = octeon_sha224_final, - .descsize = sizeof(struct sha256_state), - .base = { - .cra_name = "sha224", - .cra_driver_name= "octeon-sha224", - .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_module = THIS_MODULE, - } -} }; - -static int __init octeon_sha256_mod_init(void) -{ if (!octeon_has_crypto()) - return -ENOTSUPP; - return crypto_register_shashes(octeon_sha256_algs, - ARRAY_SIZE(octeon_sha256_algs)); + return sha256_blocks_generic(state, data, nblocks); + + flags = octeon_crypto_enable(&cop2_state); + write_octeon_64bit_hash_dword(state64[0], 0); + write_octeon_64bit_hash_dword(state64[1], 1); + write_octeon_64bit_hash_dword(state64[2], 2); + write_octeon_64bit_hash_dword(state64[3], 3); + + do { + const u64 *block = (const u64 *)data; + + write_octeon_64bit_block_dword(block[0], 0); + write_octeon_64bit_block_dword(block[1], 1); + write_octeon_64bit_block_dword(block[2], 2); + write_octeon_64bit_block_dword(block[3], 3); + write_octeon_64bit_block_dword(block[4], 4); + write_octeon_64bit_block_dword(block[5], 5); + write_octeon_64bit_block_dword(block[6], 6); + octeon_sha256_start(block[7]); + + data += SHA256_BLOCK_SIZE; + } while (--nblocks); + + state64[0] = read_octeon_64bit_hash_dword(0); + state64[1] = read_octeon_64bit_hash_dword(1); + state64[2] = read_octeon_64bit_hash_dword(2); + state64[3] = read_octeon_64bit_hash_dword(3); + octeon_crypto_disable(&cop2_state, flags); } +EXPORT_SYMBOL_GPL(sha256_blocks_arch); -static void __exit octeon_sha256_mod_fini(void) +bool sha256_is_arch_optimized(void) { - crypto_unregister_shashes(octeon_sha256_algs, - ARRAY_SIZE(octeon_sha256_algs)); + return octeon_has_crypto(); } - -module_init(octeon_sha256_mod_init); -module_exit(octeon_sha256_mod_fini); +EXPORT_SYMBOL_GPL(sha256_is_arch_optimized); MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm (OCTEON)"); +MODULE_DESCRIPTION("SHA-256 Secure Hash Algorithm (OCTEON)"); MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>"); diff --git a/arch/mips/cavium-octeon/crypto/octeon-sha512.c b/arch/mips/cavium-octeon/crypto/octeon-sha512.c index 2dee9354e33f..215311053db3 100644 --- a/arch/mips/cavium-octeon/crypto/octeon-sha512.c +++ b/arch/mips/cavium-octeon/crypto/octeon-sha512.c @@ -13,15 +13,12 @@ * Copyright (c) 2003 Kyle McMartin <kyle@debian.org> */ -#include <linux/mm.h> +#include <asm/octeon/octeon.h> +#include <crypto/internal/hash.h> #include <crypto/sha2.h> #include <crypto/sha512_base.h> -#include <linux/init.h> -#include <linux/types.h> +#include <linux/kernel.h> #include <linux/module.h> -#include <asm/byteorder.h> -#include <asm/octeon/octeon.h> -#include <crypto/internal/hash.h> #include "octeon-crypto.h" @@ -53,60 +50,31 @@ static void octeon_sha512_read_hash(struct sha512_state *sctx) sctx->state[7] = read_octeon_64bit_hash_sha512(7); } -static void octeon_sha512_transform(const void *_block) +static void octeon_sha512_transform(struct sha512_state *sctx, + const u8 *src, int blocks) { - const u64 *block = _block; - - write_octeon_64bit_block_sha512(block[0], 0); - write_octeon_64bit_block_sha512(block[1], 1); - write_octeon_64bit_block_sha512(block[2], 2); - write_octeon_64bit_block_sha512(block[3], 3); - write_octeon_64bit_block_sha512(block[4], 4); - write_octeon_64bit_block_sha512(block[5], 5); - write_octeon_64bit_block_sha512(block[6], 6); - write_octeon_64bit_block_sha512(block[7], 7); - write_octeon_64bit_block_sha512(block[8], 8); - write_octeon_64bit_block_sha512(block[9], 9); - write_octeon_64bit_block_sha512(block[10], 10); - write_octeon_64bit_block_sha512(block[11], 11); - write_octeon_64bit_block_sha512(block[12], 12); - write_octeon_64bit_block_sha512(block[13], 13); - write_octeon_64bit_block_sha512(block[14], 14); - octeon_sha512_start(block[15]); -} - -static void __octeon_sha512_update(struct sha512_state *sctx, const u8 *data, - unsigned int len) -{ - unsigned int part_len; - unsigned int index; - unsigned int i; - - /* Compute number of bytes mod 128. */ - index = sctx->count[0] % SHA512_BLOCK_SIZE; - - /* Update number of bytes. */ - if ((sctx->count[0] += len) < len) - sctx->count[1]++; - - part_len = SHA512_BLOCK_SIZE - index; - - /* Transform as many times as possible. */ - if (len >= part_len) { - memcpy(&sctx->buf[index], data, part_len); - octeon_sha512_transform(sctx->buf); - - for (i = part_len; i + SHA512_BLOCK_SIZE <= len; - i += SHA512_BLOCK_SIZE) - octeon_sha512_transform(&data[i]); - - index = 0; - } else { - i = 0; - } - - /* Buffer remaining input. */ - memcpy(&sctx->buf[index], &data[i], len - i); + do { + const u64 *block = (const u64 *)src; + + write_octeon_64bit_block_sha512(block[0], 0); + write_octeon_64bit_block_sha512(block[1], 1); + write_octeon_64bit_block_sha512(block[2], 2); + write_octeon_64bit_block_sha512(block[3], 3); + write_octeon_64bit_block_sha512(block[4], 4); + write_octeon_64bit_block_sha512(block[5], 5); + write_octeon_64bit_block_sha512(block[6], 6); + write_octeon_64bit_block_sha512(block[7], 7); + write_octeon_64bit_block_sha512(block[8], 8); + write_octeon_64bit_block_sha512(block[9], 9); + write_octeon_64bit_block_sha512(block[10], 10); + write_octeon_64bit_block_sha512(block[11], 11); + write_octeon_64bit_block_sha512(block[12], 12); + write_octeon_64bit_block_sha512(block[13], 13); + write_octeon_64bit_block_sha512(block[14], 14); + octeon_sha512_start(block[15]); + + src += SHA512_BLOCK_SIZE; + } while (--blocks); } static int octeon_sha512_update(struct shash_desc *desc, const u8 *data, @@ -115,89 +83,48 @@ static int octeon_sha512_update(struct shash_desc *desc, const u8 *data, struct sha512_state *sctx = shash_desc_ctx(desc); struct octeon_cop2_state state; unsigned long flags; - - /* - * Small updates never reach the crypto engine, so the generic sha512 is - * faster because of the heavyweight octeon_crypto_enable() / - * octeon_crypto_disable(). - */ - if ((sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE) - return crypto_sha512_update(desc, data, len); + int remain; flags = octeon_crypto_enable(&state); octeon_sha512_store_hash(sctx); - __octeon_sha512_update(sctx, data, len); + remain = sha512_base_do_update_blocks(desc, data, len, + octeon_sha512_transform); octeon_sha512_read_hash(sctx); octeon_crypto_disable(&state, flags); - - return 0; + return remain; } -static int octeon_sha512_final(struct shash_desc *desc, u8 *hash) +static int octeon_sha512_finup(struct shash_desc *desc, const u8 *src, + unsigned int len, u8 *hash) { struct sha512_state *sctx = shash_desc_ctx(desc); - static u8 padding[128] = { 0x80, }; struct octeon_cop2_state state; - __be64 *dst = (__be64 *)hash; - unsigned int pad_len; unsigned long flags; - unsigned int index; - __be64 bits[2]; - int i; - - /* Save number of bits. */ - bits[1] = cpu_to_be64(sctx->count[0] << 3); - bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61); - - /* Pad out to 112 mod 128. */ - index = sctx->count[0] & 0x7f; - pad_len = (index < 112) ? (112 - index) : ((128+112) - index); flags = octeon_crypto_enable(&state); octeon_sha512_store_hash(sctx); - __octeon_sha512_update(sctx, padding, pad_len); - - /* Append length (before padding). */ - __octeon_sha512_update(sctx, (const u8 *)bits, sizeof(bits)); + sha512_base_do_finup(desc, src, len, octeon_sha512_transform); octeon_sha512_read_hash(sctx); octeon_crypto_disable(&state, flags); - - /* Store state in digest. */ - for (i = 0; i < 8; i++) - dst[i] = cpu_to_be64(sctx->state[i]); - - /* Zeroize sensitive information. */ - memset(sctx, 0, sizeof(struct sha512_state)); - - return 0; -} - -static int octeon_sha384_final(struct shash_desc *desc, u8 *hash) -{ - u8 D[64]; - - octeon_sha512_final(desc, D); - - memcpy(hash, D, 48); - memzero_explicit(D, 64); - - return 0; + return sha512_base_finish(desc, hash); } static struct shash_alg octeon_sha512_algs[2] = { { .digestsize = SHA512_DIGEST_SIZE, .init = sha512_base_init, .update = octeon_sha512_update, - .final = octeon_sha512_final, - .descsize = sizeof(struct sha512_state), + .finup = octeon_sha512_finup, + .descsize = SHA512_STATE_SIZE, .base = { .cra_name = "sha512", .cra_driver_name= "octeon-sha512", .cra_priority = OCTEON_CR_OPCODE_PRIORITY, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, } @@ -205,12 +132,14 @@ static struct shash_alg octeon_sha512_algs[2] = { { .digestsize = SHA384_DIGEST_SIZE, .init = sha384_base_init, .update = octeon_sha512_update, - .final = octeon_sha384_final, - .descsize = sizeof(struct sha512_state), + .finup = octeon_sha512_finup, + .descsize = SHA512_STATE_SIZE, .base = { .cra_name = "sha384", .cra_driver_name= "octeon-sha384", .cra_priority = OCTEON_CR_OPCODE_PRIORITY, + .cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY | + CRYPTO_AHASH_ALG_FINUP_MAX, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_module = THIS_MODULE, } diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index e6b4d9c0c169..5c3de175ef5b 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -1503,8 +1503,8 @@ static int __init octeon_irq_init_ciu( /* Mips internal */ octeon_irq_init_core(); - ciu_domain = irq_domain_add_tree( - ciu_node, &octeon_irq_domain_ciu_ops, dd); + ciu_domain = irq_domain_create_tree(of_fwnode_handle(ciu_node), &octeon_irq_domain_ciu_ops, + dd); irq_set_default_domain(ciu_domain); /* CIU_0 */ @@ -1637,8 +1637,8 @@ static int __init octeon_irq_init_gpio( if (gpiod) { /* gpio domain host_data is the base hwirq number. */ gpiod->base_hwirq = base_hwirq; - irq_domain_add_linear( - gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod); + irq_domain_create_linear(of_fwnode_handle(gpio_node), 16, + &octeon_irq_domain_gpio_ops, gpiod); } else { pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); return -ENOMEM; @@ -2074,8 +2074,8 @@ static int __init octeon_irq_init_ciu2( /* Mips internal */ octeon_irq_init_core(); - ciu_domain = irq_domain_add_tree( - ciu_node, &octeon_irq_domain_ciu2_ops, NULL); + ciu_domain = irq_domain_create_tree(of_fwnode_handle(ciu_node), &octeon_irq_domain_ciu2_ops, + NULL); irq_set_default_domain(ciu_domain); /* CUI2 */ @@ -2331,11 +2331,12 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node, } host_data->max_bits = val; - cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits, - &octeon_irq_domain_cib_ops, - host_data); + cib_domain = irq_domain_create_linear(of_fwnode_handle(ciu_node), + host_data->max_bits, + &octeon_irq_domain_cib_ops, + host_data); if (!cib_domain) { - pr_err("ERROR: Couldn't irq_domain_add_linear()\n"); + pr_err("ERROR: Couldn't irq_domain_create_linear()\n"); return -ENOMEM; } @@ -2918,8 +2919,8 @@ static int __init octeon_irq_init_ciu3(struct device_node *ciu_node, * Initialize all domains to use the default domain. Specific major * blocks will overwrite the default domain as needed. */ - domain = irq_domain_add_tree(ciu_node, &octeon_dflt_domain_ciu3_ops, - ciu3_info); + domain = irq_domain_create_tree(of_fwnode_handle(ciu_node), &octeon_dflt_domain_ciu3_ops, + ciu3_info); for (i = 0; i < MAX_CIU3_DOMAINS; i++) ciu3_info->domain[i] = domain; diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig index 8f7c36868204..97d2cd997285 100644 --- a/arch/mips/configs/bigsur_defconfig +++ b/arch/mips/configs/bigsur_defconfig @@ -81,7 +81,6 @@ CONFIG_IP_VS_SH=m CONFIG_IP_VS_SED=m CONFIG_IP_VS_NQ=m CONFIG_IP_VS_FTP=m -CONFIG_IP_DCCP=m CONFIG_BRIDGE=m CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig index f523ee6f25bf..88ae0aa85364 100644 --- a/arch/mips/configs/cavium_octeon_defconfig +++ b/arch/mips/configs/cavium_octeon_defconfig @@ -157,7 +157,6 @@ CONFIG_CRYPTO_CBC=y CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_MD5_OCTEON=y CONFIG_CRYPTO_SHA1_OCTEON=m -CONFIG_CRYPTO_SHA256_OCTEON=m CONFIG_CRYPTO_SHA512_OCTEON=m CONFIG_CRYPTO_DES=y CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y diff --git a/arch/mips/configs/decstation_64_defconfig b/arch/mips/configs/decstation_64_defconfig index 9655567614aa..85a4472cb058 100644 --- a/arch/mips/configs/decstation_64_defconfig +++ b/arch/mips/configs/decstation_64_defconfig @@ -168,7 +168,6 @@ CONFIG_NLS_ISO8859_14=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_UTF8=m CONFIG_CRYPTO_RSA=m -CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m CONFIG_CRYPTO_CHACHA20POLY1305=m diff --git a/arch/mips/configs/decstation_defconfig b/arch/mips/configs/decstation_defconfig index 1539fe8eb34d..a3b2c8da2dde 100644 --- a/arch/mips/configs/decstation_defconfig +++ b/arch/mips/configs/decstation_defconfig @@ -163,7 +163,6 @@ CONFIG_NLS_ISO8859_14=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_UTF8=m CONFIG_CRYPTO_RSA=m -CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m CONFIG_CRYPTO_CHACHA20POLY1305=m diff --git a/arch/mips/configs/decstation_r4k_defconfig b/arch/mips/configs/decstation_r4k_defconfig index 58c36720c94a..a476717b8a6a 100644 --- a/arch/mips/configs/decstation_r4k_defconfig +++ b/arch/mips/configs/decstation_r4k_defconfig @@ -163,7 +163,6 @@ CONFIG_NLS_ISO8859_14=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_UTF8=m CONFIG_CRYPTO_RSA=m -CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_GCM=m CONFIG_CRYPTO_CHACHA20POLY1305=m diff --git a/arch/mips/configs/gcw0_defconfig b/arch/mips/configs/gcw0_defconfig index bc1ef66e3999..8b7ad877e07a 100644 --- a/arch/mips/configs/gcw0_defconfig +++ b/arch/mips/configs/gcw0_defconfig @@ -13,7 +13,6 @@ CONFIG_MIPS_CMDLINE_DTB_EXTEND=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set -# CONFIG_BOUNCE is not set CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig index 12f3eed8a946..437ef6dc0b4c 100644 --- a/arch/mips/configs/gpr_defconfig +++ b/arch/mips/configs/gpr_defconfig @@ -84,7 +84,6 @@ CONFIG_BRIDGE_EBT_MARK_T=m CONFIG_BRIDGE_EBT_REDIRECT=m CONFIG_BRIDGE_EBT_SNAT=m CONFIG_BRIDGE_EBT_LOG=m -CONFIG_IP_DCCP=m CONFIG_IP_SCTP=m CONFIG_TIPC=m CONFIG_ATM=y @@ -273,7 +272,7 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_850=y CONFIG_NLS_ISO8859_1=y CONFIG_CRYPTO_AUTHENC=m -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m diff --git a/arch/mips/configs/ip28_defconfig b/arch/mips/configs/ip28_defconfig index e0040110a3ee..6db21e498faa 100644 --- a/arch/mips/configs/ip28_defconfig +++ b/arch/mips/configs/ip28_defconfig @@ -60,6 +60,5 @@ CONFIG_TMPFS_POSIX_ACL=y CONFIG_NFS_FS=y CONFIG_NFS_V3_ACL=y CONFIG_ROOT_NFS=y -CONFIG_CRYPTO_MANAGER=y # CONFIG_CRYPTO_HW is not set CONFIG_MAGIC_SYSRQ=y diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig index 71d6340497c9..5038a27d035f 100644 --- a/arch/mips/configs/lemote2f_defconfig +++ b/arch/mips/configs/lemote2f_defconfig @@ -297,7 +297,7 @@ CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m CONFIG_NLS_UTF8=y CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m CONFIG_CRYPTO_CAST5=m diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig index 06b7a0b97eca..e4bcdb64df6c 100644 --- a/arch/mips/configs/mtx1_defconfig +++ b/arch/mips/configs/mtx1_defconfig @@ -130,7 +130,6 @@ CONFIG_BRIDGE_EBT_MARK_T=m CONFIG_BRIDGE_EBT_REDIRECT=m CONFIG_BRIDGE_EBT_SNAT=m CONFIG_BRIDGE_EBT_LOG=m -CONFIG_IP_DCCP=m CONFIG_IP_SCTP=m CONFIG_TIPC=m CONFIG_ATM=y @@ -662,7 +661,7 @@ CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m CONFIG_NLS_UTF8=m -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_MD5=y diff --git a/arch/mips/configs/rb532_defconfig b/arch/mips/configs/rb532_defconfig index 42b161d587c7..9fb114ef5e2d 100644 --- a/arch/mips/configs/rb532_defconfig +++ b/arch/mips/configs/rb532_defconfig @@ -153,6 +153,6 @@ CONFIG_JFFS2_FS=y CONFIG_JFFS2_SUMMARY=y CONFIG_JFFS2_COMPRESSION_OPTIONS=y CONFIG_SQUASHFS=y -CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_BENCHMARK=m # CONFIG_CRYPTO_HW is not set CONFIG_STRIP_ASM_SYMS=y diff --git a/arch/mips/crypto/Kconfig b/arch/mips/crypto/Kconfig index 545fc0e12422..6bf073ae7613 100644 --- a/arch/mips/crypto/Kconfig +++ b/arch/mips/crypto/Kconfig @@ -2,17 +2,6 @@ menu "Accelerated Cryptographic Algorithms for CPU (mips)" -config CRYPTO_POLY1305_MIPS - tristate - depends on MIPS - select CRYPTO_HASH - select CRYPTO_ARCH_HAVE_LIB_POLY1305 - default CRYPTO_LIB_POLY1305_INTERNAL - help - Poly1305 authenticator algorithm (RFC7539) - - Architecture: mips - config CRYPTO_MD5_OCTEON tristate "Digests: MD5 (OCTEON)" depends on CPU_CAVIUM_OCTEON @@ -33,16 +22,6 @@ config CRYPTO_SHA1_OCTEON Architecture: mips OCTEON -config CRYPTO_SHA256_OCTEON - tristate "Hash functions: SHA-224 and SHA-256 (OCTEON)" - depends on CPU_CAVIUM_OCTEON - select CRYPTO_SHA256 - select CRYPTO_HASH - help - SHA-224 and SHA-256 secure hash algorithms (FIPS 180) - - Architecture: mips OCTEON using crypto instructions, when available - config CRYPTO_SHA512_OCTEON tristate "Hash functions: SHA-384 and SHA-512 (OCTEON)" depends on CPU_CAVIUM_OCTEON @@ -53,16 +32,4 @@ config CRYPTO_SHA512_OCTEON Architecture: mips OCTEON using crypto instructions, when available -config CRYPTO_CHACHA_MIPS - tristate - depends on CPU_MIPS32_R2 - select CRYPTO_SKCIPHER - select CRYPTO_ARCH_HAVE_LIB_CHACHA - default CRYPTO_LIB_CHACHA_INTERNAL - help - Length-preserving ciphers: ChaCha20, XChaCha20, and XChaCha12 - stream cipher algorithms - - Architecture: MIPS32r2 - endmenu diff --git a/arch/mips/crypto/Makefile b/arch/mips/crypto/Makefile index fddc88281412..5adb631a69c1 100644 --- a/arch/mips/crypto/Makefile +++ b/arch/mips/crypto/Makefile @@ -3,20 +3,3 @@ # Makefile for MIPS crypto files.. # -obj-$(CONFIG_CRYPTO_CHACHA_MIPS) += chacha-mips.o -chacha-mips-y := chacha-core.o chacha-glue.o -AFLAGS_chacha-core.o += -O2 # needed to fill branch delay slots - -obj-$(CONFIG_CRYPTO_POLY1305_MIPS) += poly1305-mips.o -poly1305-mips-y := poly1305-core.o poly1305-glue.o - -perlasm-flavour-$(CONFIG_32BIT) := o32 -perlasm-flavour-$(CONFIG_64BIT) := 64 - -quiet_cmd_perlasm = PERLASM $@ - cmd_perlasm = $(PERL) $(<) $(perlasm-flavour-y) $(@) - -$(obj)/poly1305-core.S: $(src)/poly1305-mips.pl FORCE - $(call if_changed,perlasm) - -targets += poly1305-core.S diff --git a/arch/mips/crypto/chacha-glue.c b/arch/mips/crypto/chacha-glue.c deleted file mode 100644 index f6fc2e1079a1..000000000000 --- a/arch/mips/crypto/chacha-glue.c +++ /dev/null @@ -1,146 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * MIPS accelerated ChaCha and XChaCha stream ciphers, - * including ChaCha20 (RFC7539) - * - * Copyright (C) 2019 Linaro, Ltd. <ard.biesheuvel@linaro.org> - */ - -#include <asm/byteorder.h> -#include <crypto/algapi.h> -#include <crypto/internal/chacha.h> -#include <crypto/internal/skcipher.h> -#include <linux/kernel.h> -#include <linux/module.h> - -asmlinkage void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, - unsigned int bytes, int nrounds); -EXPORT_SYMBOL(chacha_crypt_arch); - -asmlinkage void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds); -EXPORT_SYMBOL(hchacha_block_arch); - -static int chacha_mips_stream_xor(struct skcipher_request *req, - const struct chacha_ctx *ctx, const u8 *iv) -{ - struct skcipher_walk walk; - u32 state[16]; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - chacha_init(state, ctx->key, iv); - - while (walk.nbytes > 0) { - unsigned int nbytes = walk.nbytes; - - if (nbytes < walk.total) - nbytes = round_down(nbytes, walk.stride); - - chacha_crypt(state, walk.dst.virt.addr, walk.src.virt.addr, - nbytes, ctx->nrounds); - err = skcipher_walk_done(&walk, walk.nbytes - nbytes); - } - - return err; -} - -static int chacha_mips(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - - return chacha_mips_stream_xor(req, ctx, req->iv); -} - -static int xchacha_mips(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - struct chacha_ctx subctx; - u32 state[16]; - u8 real_iv[16]; - - chacha_init(state, ctx->key, req->iv); - - hchacha_block(state, subctx.key, ctx->nrounds); - subctx.nrounds = ctx->nrounds; - - memcpy(&real_iv[0], req->iv + 24, 8); - memcpy(&real_iv[8], req->iv + 16, 8); - return chacha_mips_stream_xor(req, &subctx, real_iv); -} - -static struct skcipher_alg algs[] = { - { - .base.cra_name = "chacha20", - .base.cra_driver_name = "chacha20-mips", - .base.cra_priority = 200, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = CHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .setkey = chacha20_setkey, - .encrypt = chacha_mips, - .decrypt = chacha_mips, - }, { - .base.cra_name = "xchacha20", - .base.cra_driver_name = "xchacha20-mips", - .base.cra_priority = 200, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = XCHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .setkey = chacha20_setkey, - .encrypt = xchacha_mips, - .decrypt = xchacha_mips, - }, { - .base.cra_name = "xchacha12", - .base.cra_driver_name = "xchacha12-mips", - .base.cra_priority = 200, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct chacha_ctx), - .base.cra_module = THIS_MODULE, - - .min_keysize = CHACHA_KEY_SIZE, - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = XCHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .setkey = chacha12_setkey, - .encrypt = xchacha_mips, - .decrypt = xchacha_mips, - } -}; - -static int __init chacha_simd_mod_init(void) -{ - return IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER) ? - crypto_register_skciphers(algs, ARRAY_SIZE(algs)) : 0; -} - -static void __exit chacha_simd_mod_fini(void) -{ - if (IS_REACHABLE(CONFIG_CRYPTO_SKCIPHER)) - crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); -} - -module_init(chacha_simd_mod_init); -module_exit(chacha_simd_mod_fini); - -MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (MIPS accelerated)"); -MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS_CRYPTO("chacha20"); -MODULE_ALIAS_CRYPTO("chacha20-mips"); -MODULE_ALIAS_CRYPTO("xchacha20"); -MODULE_ALIAS_CRYPTO("xchacha20-mips"); -MODULE_ALIAS_CRYPTO("xchacha12"); -MODULE_ALIAS_CRYPTO("xchacha12-mips"); diff --git a/arch/mips/crypto/poly1305-glue.c b/arch/mips/crypto/poly1305-glue.c deleted file mode 100644 index c03ad0bbe69c..000000000000 --- a/arch/mips/crypto/poly1305-glue.c +++ /dev/null @@ -1,192 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * OpenSSL/Cryptogams accelerated Poly1305 transform for MIPS - * - * Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org> - */ - -#include <linux/unaligned.h> -#include <crypto/algapi.h> -#include <crypto/internal/hash.h> -#include <crypto/internal/poly1305.h> -#include <linux/cpufeature.h> -#include <linux/crypto.h> -#include <linux/module.h> - -asmlinkage void poly1305_init_mips(void *state, const u8 *key); -asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit); -asmlinkage void poly1305_emit_mips(void *state, u8 *digest, const u32 *nonce); - -void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE]) -{ - poly1305_init_mips(&dctx->h, key); - dctx->s[0] = get_unaligned_le32(key + 16); - dctx->s[1] = get_unaligned_le32(key + 20); - dctx->s[2] = get_unaligned_le32(key + 24); - dctx->s[3] = get_unaligned_le32(key + 28); - dctx->buflen = 0; -} -EXPORT_SYMBOL(poly1305_init_arch); - -static int mips_poly1305_init(struct shash_desc *desc) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - - dctx->buflen = 0; - dctx->rset = 0; - dctx->sset = false; - - return 0; -} - -static void mips_poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src, - u32 len, u32 hibit) -{ - if (unlikely(!dctx->sset)) { - if (!dctx->rset) { - poly1305_init_mips(&dctx->h, src); - src += POLY1305_BLOCK_SIZE; - len -= POLY1305_BLOCK_SIZE; - dctx->rset = 1; - } - if (len >= POLY1305_BLOCK_SIZE) { - dctx->s[0] = get_unaligned_le32(src + 0); - dctx->s[1] = get_unaligned_le32(src + 4); - dctx->s[2] = get_unaligned_le32(src + 8); - dctx->s[3] = get_unaligned_le32(src + 12); - src += POLY1305_BLOCK_SIZE; - len -= POLY1305_BLOCK_SIZE; - dctx->sset = true; - } - if (len < POLY1305_BLOCK_SIZE) - return; - } - - len &= ~(POLY1305_BLOCK_SIZE - 1); - - poly1305_blocks_mips(&dctx->h, src, len, hibit); -} - -static int mips_poly1305_update(struct shash_desc *desc, const u8 *src, - unsigned int len) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - - if (unlikely(dctx->buflen)) { - u32 bytes = min(len, POLY1305_BLOCK_SIZE - dctx->buflen); - - memcpy(dctx->buf + dctx->buflen, src, bytes); - src += bytes; - len -= bytes; - dctx->buflen += bytes; - - if (dctx->buflen == POLY1305_BLOCK_SIZE) { - mips_poly1305_blocks(dctx, dctx->buf, POLY1305_BLOCK_SIZE, 1); - dctx->buflen = 0; - } - } - - if (likely(len >= POLY1305_BLOCK_SIZE)) { - mips_poly1305_blocks(dctx, src, len, 1); - src += round_down(len, POLY1305_BLOCK_SIZE); - len %= POLY1305_BLOCK_SIZE; - } - - if (unlikely(len)) { - dctx->buflen = len; - memcpy(dctx->buf, src, len); - } - return 0; -} - -void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src, - unsigned int nbytes) -{ - if (unlikely(dctx->buflen)) { - u32 bytes = min(nbytes, POLY1305_BLOCK_SIZE - dctx->buflen); - - memcpy(dctx->buf + dctx->buflen, src, bytes); - src += bytes; - nbytes -= bytes; - dctx->buflen += bytes; - - if (dctx->buflen == POLY1305_BLOCK_SIZE) { - poly1305_blocks_mips(&dctx->h, dctx->buf, - POLY1305_BLOCK_SIZE, 1); - dctx->buflen = 0; - } - } - - if (likely(nbytes >= POLY1305_BLOCK_SIZE)) { - unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE); - - poly1305_blocks_mips(&dctx->h, src, len, 1); - src += len; - nbytes %= POLY1305_BLOCK_SIZE; - } - - if (unlikely(nbytes)) { - dctx->buflen = nbytes; - memcpy(dctx->buf, src, nbytes); - } -} -EXPORT_SYMBOL(poly1305_update_arch); - -void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst) -{ - if (unlikely(dctx->buflen)) { - dctx->buf[dctx->buflen++] = 1; - memset(dctx->buf + dctx->buflen, 0, - POLY1305_BLOCK_SIZE - dctx->buflen); - poly1305_blocks_mips(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0); - } - - poly1305_emit_mips(&dctx->h, dst, dctx->s); - *dctx = (struct poly1305_desc_ctx){}; -} -EXPORT_SYMBOL(poly1305_final_arch); - -static int mips_poly1305_final(struct shash_desc *desc, u8 *dst) -{ - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - - if (unlikely(!dctx->sset)) - return -ENOKEY; - - poly1305_final_arch(dctx, dst); - return 0; -} - -static struct shash_alg mips_poly1305_alg = { - .init = mips_poly1305_init, - .update = mips_poly1305_update, - .final = mips_poly1305_final, - .digestsize = POLY1305_DIGEST_SIZE, - .descsize = sizeof(struct poly1305_desc_ctx), - - .base.cra_name = "poly1305", - .base.cra_driver_name = "poly1305-mips", - .base.cra_priority = 200, - .base.cra_blocksize = POLY1305_BLOCK_SIZE, - .base.cra_module = THIS_MODULE, -}; - -static int __init mips_poly1305_mod_init(void) -{ - return IS_REACHABLE(CONFIG_CRYPTO_HASH) ? - crypto_register_shash(&mips_poly1305_alg) : 0; -} - -static void __exit mips_poly1305_mod_exit(void) -{ - if (IS_REACHABLE(CONFIG_CRYPTO_HASH)) - crypto_unregister_shash(&mips_poly1305_alg); -} - -module_init(mips_poly1305_mod_init); -module_exit(mips_poly1305_mod_exit); - -MODULE_DESCRIPTION("Poly1305 transform (MIPS accelerated"); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS_CRYPTO("poly1305"); -MODULE_ALIAS_CRYPTO("poly1305-mips"); diff --git a/arch/mips/include/asm/socket.h b/arch/mips/include/asm/socket.h index 4724a563c5bf..43a09f0dd3ff 100644 --- a/arch/mips/include/asm/socket.h +++ b/arch/mips/include/asm/socket.h @@ -36,15 +36,6 @@ enum sock_type { SOCK_PACKET = 10, }; -#define SOCK_MAX (SOCK_PACKET + 1) -/* Mask which covers at least up to SOCK_MASK-1. The - * * remaining bits are used as flags. */ -#define SOCK_TYPE_MASK 0xf - -/* Flags for socket, socketpair, paccept */ -#define SOCK_CLOEXEC O_CLOEXEC -#define SOCK_NONBLOCK O_NONBLOCK - #define ARCH_HAS_SOCKET_TYPES 1 #endif /* _ASM_SOCKET_H */ diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h index 22fa8f19924a..31ac655b7837 100644 --- a/arch/mips/include/uapi/asm/socket.h +++ b/arch/mips/include/uapi/asm/socket.h @@ -161,6 +161,8 @@ #define SO_RCVPRIORITY 82 +#define SO_PASSRIGHTS 83 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index c4d6b09136b1..196a070349b0 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -791,8 +791,7 @@ static void handle_associated_event(struct cpu_hw_events *cpuc, if (!mipspmu_event_set_period(event, hwc, idx)) return; - if (perf_event_overflow(event, data, regs)) - mipsxx_pmu_disable_event(idx); + perf_event_overflow(event, data, regs); } diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index 8f208007b8e8..a112573b6e37 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c @@ -377,7 +377,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent) for (i = 0; i < MAX_IM; i++) irq_set_chained_handler(i + 2, ltq_hw_irq_handler); - ltq_domain = irq_domain_add_linear(node, + ltq_domain = irq_domain_create_linear(of_fwnode_handle(node), (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE, &irq_domain_ops, 0); diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile index 9c024e6d5e54..9d75845ef78e 100644 --- a/arch/mips/lib/Makefile +++ b/arch/mips/lib/Makefile @@ -3,6 +3,8 @@ # Makefile for MIPS-specific library files.. # +obj-y += crypto/ + lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \ mips-atomic.o strncpy_user.o \ strnlen_user.o uncached.o diff --git a/arch/mips/lib/crc32-mips.c b/arch/mips/lib/crc32-mips.c index 676a4b3e290b..45e4d2c9fbf5 100644 --- a/arch/mips/lib/crc32-mips.c +++ b/arch/mips/lib/crc32-mips.c @@ -62,7 +62,7 @@ do { \ #define CRC32C(crc, value, size) \ _CRC32(crc, value, size, crc32c) -static DEFINE_STATIC_KEY_FALSE(have_crc32); +static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_crc32); u32 crc32_le_arch(u32 crc, const u8 *p, size_t len) { @@ -163,7 +163,7 @@ static int __init crc32_mips_init(void) static_branch_enable(&have_crc32); return 0; } -arch_initcall(crc32_mips_init); +subsys_initcall(crc32_mips_init); static void __exit crc32_mips_exit(void) { diff --git a/arch/mips/lib/crypto/.gitignore b/arch/mips/lib/crypto/.gitignore new file mode 100644 index 000000000000..0d47d4f21c6d --- /dev/null +++ b/arch/mips/lib/crypto/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +poly1305-core.S diff --git a/arch/mips/lib/crypto/Kconfig b/arch/mips/lib/crypto/Kconfig new file mode 100644 index 000000000000..0670a170c1be --- /dev/null +++ b/arch/mips/lib/crypto/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config CRYPTO_CHACHA_MIPS + tristate + depends on CPU_MIPS32_R2 + default CRYPTO_LIB_CHACHA + select CRYPTO_ARCH_HAVE_LIB_CHACHA + +config CRYPTO_POLY1305_MIPS + tristate + default CRYPTO_LIB_POLY1305 + select CRYPTO_ARCH_HAVE_LIB_POLY1305 diff --git a/arch/mips/lib/crypto/Makefile b/arch/mips/lib/crypto/Makefile new file mode 100644 index 000000000000..804488c7aded --- /dev/null +++ b/arch/mips/lib/crypto/Makefile @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_CRYPTO_CHACHA_MIPS) += chacha-mips.o +chacha-mips-y := chacha-core.o chacha-glue.o +AFLAGS_chacha-core.o += -O2 # needed to fill branch delay slots + +obj-$(CONFIG_CRYPTO_POLY1305_MIPS) += poly1305-mips.o +poly1305-mips-y := poly1305-core.o poly1305-glue.o + +perlasm-flavour-$(CONFIG_32BIT) := o32 +perlasm-flavour-$(CONFIG_64BIT) := 64 + +quiet_cmd_perlasm = PERLASM $@ + cmd_perlasm = $(PERL) $(<) $(perlasm-flavour-y) $(@) + +$(obj)/poly1305-core.S: $(src)/poly1305-mips.pl FORCE + $(call if_changed,perlasm) + +targets += poly1305-core.S diff --git a/arch/mips/crypto/chacha-core.S b/arch/mips/lib/crypto/chacha-core.S index 5755f69cfe00..5755f69cfe00 100644 --- a/arch/mips/crypto/chacha-core.S +++ b/arch/mips/lib/crypto/chacha-core.S diff --git a/arch/mips/lib/crypto/chacha-glue.c b/arch/mips/lib/crypto/chacha-glue.c new file mode 100644 index 000000000000..88c097594eb0 --- /dev/null +++ b/arch/mips/lib/crypto/chacha-glue.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ChaCha and HChaCha functions (MIPS optimized) + * + * Copyright (C) 2019 Linaro, Ltd. <ard.biesheuvel@linaro.org> + */ + +#include <crypto/chacha.h> +#include <linux/kernel.h> +#include <linux/module.h> + +asmlinkage void chacha_crypt_arch(struct chacha_state *state, + u8 *dst, const u8 *src, + unsigned int bytes, int nrounds); +EXPORT_SYMBOL(chacha_crypt_arch); + +asmlinkage void hchacha_block_arch(const struct chacha_state *state, + u32 out[HCHACHA_OUT_WORDS], int nrounds); +EXPORT_SYMBOL(hchacha_block_arch); + +bool chacha_is_arch_optimized(void) +{ + return true; +} +EXPORT_SYMBOL(chacha_is_arch_optimized); + +MODULE_DESCRIPTION("ChaCha and HChaCha functions (MIPS optimized)"); +MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/mips/lib/crypto/poly1305-glue.c b/arch/mips/lib/crypto/poly1305-glue.c new file mode 100644 index 000000000000..764a38a65200 --- /dev/null +++ b/arch/mips/lib/crypto/poly1305-glue.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * OpenSSL/Cryptogams accelerated Poly1305 transform for MIPS + * + * Copyright (C) 2019 Linaro Ltd. <ard.biesheuvel@linaro.org> + */ + +#include <crypto/internal/poly1305.h> +#include <linux/cpufeature.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/unaligned.h> + +asmlinkage void poly1305_block_init_arch( + struct poly1305_block_state *state, + const u8 raw_key[POLY1305_BLOCK_SIZE]); +EXPORT_SYMBOL_GPL(poly1305_block_init_arch); +asmlinkage void poly1305_blocks_arch(struct poly1305_block_state *state, + const u8 *src, u32 len, u32 hibit); +EXPORT_SYMBOL_GPL(poly1305_blocks_arch); +asmlinkage void poly1305_emit_arch(const struct poly1305_state *state, + u8 digest[POLY1305_DIGEST_SIZE], + const u32 nonce[4]); +EXPORT_SYMBOL_GPL(poly1305_emit_arch); + +bool poly1305_is_arch_optimized(void) +{ + return true; +} +EXPORT_SYMBOL(poly1305_is_arch_optimized); + +MODULE_DESCRIPTION("Poly1305 transform (MIPS accelerated"); +MODULE_LICENSE("GPL v2"); diff --git a/arch/mips/crypto/poly1305-mips.pl b/arch/mips/lib/crypto/poly1305-mips.pl index b05bab884ed2..399f10c3e385 100644 --- a/arch/mips/crypto/poly1305-mips.pl +++ b/arch/mips/lib/crypto/poly1305-mips.pl @@ -93,9 +93,9 @@ $code.=<<___; #endif #ifdef __KERNEL__ -# define poly1305_init poly1305_init_mips -# define poly1305_blocks poly1305_blocks_mips -# define poly1305_emit poly1305_emit_mips +# define poly1305_init poly1305_block_init_arch +# define poly1305_blocks poly1305_blocks_arch +# define poly1305_emit poly1305_emit_arch #endif #if defined(__MIPSEB__) && !defined(MIPSEB) @@ -565,9 +565,9 @@ $code.=<<___; #endif #ifdef __KERNEL__ -# define poly1305_init poly1305_init_mips -# define poly1305_blocks poly1305_blocks_mips -# define poly1305_emit poly1305_emit_mips +# define poly1305_init poly1305_block_init_arch +# define poly1305_blocks poly1305_blocks_arch +# define poly1305_emit poly1305_emit_arch #endif #if defined(__MIPSEB__) && !defined(MIPSEB) diff --git a/arch/mips/pci/pci-ar2315.c b/arch/mips/pci/pci-ar2315.c index a925842ee125..17fa97ec6ffb 100644 --- a/arch/mips/pci/pci-ar2315.c +++ b/arch/mips/pci/pci-ar2315.c @@ -469,8 +469,8 @@ static int ar2315_pci_probe(struct platform_device *pdev) if (err) return err; - apc->domain = irq_domain_add_linear(NULL, AR2315_PCI_IRQ_COUNT, - &ar2315_pci_irq_domain_ops, apc); + apc->domain = irq_domain_create_linear(NULL, AR2315_PCI_IRQ_COUNT, + &ar2315_pci_irq_domain_ops, apc); if (!apc->domain) { dev_err(dev, "failed to add IRQ domain\n"); return -ENOMEM; diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c index 4ac68a534e4f..14454ece485d 100644 --- a/arch/mips/pci/pci-rt3883.c +++ b/arch/mips/pci/pci-rt3883.c @@ -208,9 +208,10 @@ static int rt3883_pci_irq_init(struct device *dev, rt3883_pci_w32(rpc, 0, RT3883_PCI_REG_PCIENA); rpc->irq_domain = - irq_domain_add_linear(rpc->intc_of_node, RT3883_PCI_IRQ_COUNT, - &rt3883_pci_irq_domain_ops, - rpc); + irq_domain_create_linear(of_fwnode_handle(rpc->intc_of_node), + RT3883_PCI_IRQ_COUNT, + &rt3883_pci_irq_domain_ops, + rpc); if (!rpc->irq_domain) { dev_err(dev, "unable to add IRQ domain\n"); return -ENODEV; diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c index 46aef0a1b22a..af5bbbea949b 100644 --- a/arch/mips/ralink/irq.c +++ b/arch/mips/ralink/irq.c @@ -176,7 +176,7 @@ static int __init intc_of_init(struct device_node *node, /* route all INTC interrupts to MIPS HW0 interrupt */ rt_intc_w32(0, INTC_REG_TYPE); - domain = irq_domain_add_legacy(node, RALINK_INTC_IRQ_COUNT, + domain = irq_domain_create_legacy(of_fwnode_handle(node), RALINK_INTC_IRQ_COUNT, RALINK_INTC_IRQ_BASE, 0, &irq_domain_ops, NULL); if (!domain) panic("Failed to add irqdomain"); |