diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig | 12 | ||||
| -rw-r--r-- | lib/Kconfig.debug | 29 | ||||
| -rw-r--r-- | lib/Makefile | 6 | ||||
| -rw-r--r-- | lib/cpu_rmap.c | 6 | ||||
| -rw-r--r-- | lib/crc-t10dif.c | 83 | ||||
| -rw-r--r-- | lib/crc32.c | 17 | ||||
| -rw-r--r-- | lib/debugobjects.c | 20 | ||||
| -rw-r--r-- | lib/decompress_inflate.c | 2 | ||||
| -rw-r--r-- | lib/div64.c | 40 | ||||
| -rw-r--r-- | lib/dump_stack.c | 4 | ||||
| -rw-r--r-- | lib/dynamic_debug.c | 2 | ||||
| -rw-r--r-- | lib/earlycpio.c | 27 | ||||
| -rw-r--r-- | lib/genalloc.c | 22 | ||||
| -rw-r--r-- | lib/hexdump.c | 2 | ||||
| -rw-r--r-- | lib/kobject.c | 34 | ||||
| -rw-r--r-- | lib/lockref.c | 183 | ||||
| -rw-r--r-- | lib/lz4/lz4_decompress.c | 8 | ||||
| -rw-r--r-- | lib/percpu_ida.c | 335 | ||||
| -rw-r--r-- | lib/radix-tree.c | 41 | ||||
| -rw-r--r-- | lib/raid6/.gitignore | 1 | ||||
| -rw-r--r-- | lib/raid6/Makefile | 46 | ||||
| -rw-r--r-- | lib/raid6/algos.c | 9 | ||||
| -rw-r--r-- | lib/raid6/neon.c | 58 | ||||
| -rw-r--r-- | lib/raid6/neon.uc | 80 | ||||
| -rw-r--r-- | lib/raid6/test/Makefile | 35 | ||||
| -rw-r--r-- | lib/raid6/tilegx.uc | 86 | ||||
| -rw-r--r-- | lib/rbtree.c | 40 | ||||
| -rw-r--r-- | lib/rbtree_test.c | 12 | ||||
| -rw-r--r-- | lib/swiotlb.c | 8 | ||||
| -rw-r--r-- | lib/vsprintf.c | 82 | 
30 files changed, 1221 insertions, 109 deletions
| diff --git a/lib/Kconfig b/lib/Kconfig index 71d9f81f6eed..b3c8be0da17f 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -48,6 +48,16 @@ config STMP_DEVICE  config PERCPU_RWSEM  	boolean +config ARCH_USE_CMPXCHG_LOCKREF +	bool + +config CMPXCHG_LOCKREF +	def_bool y if ARCH_USE_CMPXCHG_LOCKREF +	depends on SMP +	depends on !GENERIC_LOCKBREAK +	depends on !DEBUG_SPINLOCK +	depends on !DEBUG_LOCK_ALLOC +  config CRC_CCITT  	tristate "CRC-CCITT functions"  	help @@ -66,6 +76,8 @@ config CRC16  config CRC_T10DIF  	tristate "CRC calculation for the T10 Data Integrity Field" +	select CRYPTO +	select CRYPTO_CRCT10DIF  	help  	  This option is only needed if a module that's not in the  	  kernel tree needs to calculate CRC checks for use with the diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1501aa553221..06344d986eb9 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -597,7 +597,7 @@ endmenu # "Memory Debugging"  config DEBUG_SHIRQ  	bool "Debug shared IRQ handlers" -	depends on DEBUG_KERNEL && GENERIC_HARDIRQS +	depends on DEBUG_KERNEL  	help  	  Enable this to generate a spurious interrupt as soon as a shared  	  interrupt handler is registered, and just before one is deregistered. @@ -908,7 +908,7 @@ config LOCKDEP  	bool  	depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT  	select STACKTRACE -	select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE +	select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC  	select KALLSYMS  	select KALLSYMS_ALL @@ -981,6 +981,25 @@ config DEBUG_KOBJECT  	  If you say Y here, some extra kobject debugging messages will be sent  	  to the syslog.  +config DEBUG_KOBJECT_RELEASE +	bool "kobject release debugging" +	depends on DEBUG_KERNEL +	help +	  kobjects are reference counted objects.  This means that their +	  last reference count put is not predictable, and the kobject can +	  live on past the point at which a driver decides to drop it's +	  initial reference to the kobject gained on allocation.  An +	  example of this would be a struct device which has just been +	  unregistered. + +	  However, some buggy drivers assume that after such an operation, +	  the memory backing the kobject can be immediately freed.  This +	  goes completely against the principles of a refcounted object. + +	  If you say Y here, the kernel will delay the release of kobjects +	  on the last reference count to improve the visibility of this +	  kind of kobject release bug. +  config HAVE_DEBUG_BUGVERBOSE  	bool @@ -1347,7 +1366,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER  	depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT  	depends on !X86_64  	select STACKTRACE -	select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND +	select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC  	help  	  Provide stacktrace filter for fault-injection capabilities @@ -1357,7 +1376,7 @@ config LATENCYTOP  	depends on DEBUG_KERNEL  	depends on STACKTRACE_SUPPORT  	depends on PROC_FS -	select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND +	select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC  	select KALLSYMS  	select KALLSYMS_ALL  	select STACKTRACE @@ -1442,7 +1461,7 @@ config BACKTRACE_SELF_TEST  config RBTREE_TEST  	tristate "Red-Black tree test" -	depends on m && DEBUG_KERNEL +	depends on DEBUG_KERNEL  	help  	  A benchmark measuring the performance of the rbtree library.  	  Also includes rbtree invariant checks. diff --git a/lib/Makefile b/lib/Makefile index 7baccfd8a4e9..f3bb2cb98adf 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -13,18 +13,20 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \  	 sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \  	 proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \  	 is_single_threaded.o plist.o decompress.o kobject_uevent.o \ -	 earlycpio.o percpu-refcount.o +	 earlycpio.o percpu-refcount.o percpu_ida.o  obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o  lib-$(CONFIG_MMU) += ioremap.o  lib-$(CONFIG_SMP) += cpumask.o  lib-y	+= kobject.o klist.o +obj-y	+= lockref.o  obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \  	 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \  	 gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ -	 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o +	 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ +	 percpu_ida.o  obj-y += string_helpers.o  obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o  obj-y += kstrtox.o diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c index 5fbed5caba6e..4f134d8907a7 100644 --- a/lib/cpu_rmap.c +++ b/lib/cpu_rmap.c @@ -8,9 +8,7 @@   */  #include <linux/cpu_rmap.h> -#ifdef CONFIG_GENERIC_HARDIRQS  #include <linux/interrupt.h> -#endif  #include <linux/export.h>  /* @@ -213,8 +211,6 @@ int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,  }  EXPORT_SYMBOL(cpu_rmap_update); -#ifdef CONFIG_GENERIC_HARDIRQS -  /* Glue between IRQ affinity notifiers and CPU rmaps */  struct irq_glue { @@ -309,5 +305,3 @@ int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)  	return rc;  }  EXPORT_SYMBOL(irq_cpu_rmap_add); - -#endif /* CONFIG_GENERIC_HARDIRQS */ diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c index fbbd66ed86cd..dfe6ec17c0a5 100644 --- a/lib/crc-t10dif.c +++ b/lib/crc-t10dif.c @@ -11,57 +11,54 @@  #include <linux/types.h>  #include <linux/module.h>  #include <linux/crc-t10dif.h> +#include <linux/err.h> +#include <linux/init.h> +#include <crypto/hash.h> +#include <linux/static_key.h> -/* Table generated using the following polynomium: - * x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1 - * gt: 0x8bb7 - */ -static const __u16 t10_dif_crc_table[256] = { -	0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B, -	0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6, -	0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6, -	0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B, -	0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1, -	0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C, -	0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C, -	0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781, -	0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8, -	0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255, -	0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925, -	0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698, -	0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472, -	0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF, -	0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF, -	0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02, -	0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA, -	0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067, -	0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17, -	0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA, -	0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640, -	0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD, -	0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D, -	0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30, -	0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759, -	0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4, -	0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394, -	0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29, -	0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3, -	0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E, -	0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E, -	0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3 -}; +static struct crypto_shash *crct10dif_tfm; +static struct static_key crct10dif_fallback __read_mostly;  __u16 crc_t10dif(const unsigned char *buffer, size_t len)  { -	__u16 crc = 0; -	unsigned int i; +	struct { +		struct shash_desc shash; +		char ctx[2]; +	} desc; +	int err; + +	if (static_key_false(&crct10dif_fallback)) +		return crc_t10dif_generic(0, buffer, len); + +	desc.shash.tfm = crct10dif_tfm; +	desc.shash.flags = 0; +	*(__u16 *)desc.ctx = 0; -	for (i = 0 ; i < len ; i++) -		crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff]; +	err = crypto_shash_update(&desc.shash, buffer, len); +	BUG_ON(err); -	return crc; +	return *(__u16 *)desc.ctx;  }  EXPORT_SYMBOL(crc_t10dif); +static int __init crc_t10dif_mod_init(void) +{ +	crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0); +	if (IS_ERR(crct10dif_tfm)) { +		static_key_slow_inc(&crct10dif_fallback); +		crct10dif_tfm = NULL; +	} +	return 0; +} + +static void __exit crc_t10dif_mod_fini(void) +{ +	crypto_free_shash(crct10dif_tfm); +} + +module_init(crc_t10dif_mod_init); +module_exit(crc_t10dif_mod_fini); +  MODULE_DESCRIPTION("T10 DIF CRC calculation");  MODULE_LICENSE("GPL"); +MODULE_SOFTDEP("pre: crct10dif"); diff --git a/lib/crc32.c b/lib/crc32.c index 072fbd8234d5..410093dbe51c 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -131,11 +131,14 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])  #endif  /** - * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32 - * @crc: seed value for computation.  ~0 for Ethernet, sometimes 0 for - *	other uses, or the previous crc32 value if computing incrementally. - * @p: pointer to buffer over which CRC is run + * crc32_le_generic() - Calculate bitwise little-endian Ethernet AUTODIN II + *			CRC32/CRC32C + * @crc: seed value for computation.  ~0 for Ethernet, sometimes 0 for other + *	 uses, or the previous crc32/crc32c value if computing incrementally. + * @p: pointer to buffer over which CRC32/CRC32C is run   * @len: length of buffer @p + * @tab: little-endian Ethernet table + * @polynomial: CRC32/CRC32c LE polynomial   */  static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,  					  size_t len, const u32 (*tab)[256], @@ -201,11 +204,13 @@ EXPORT_SYMBOL(crc32_le);  EXPORT_SYMBOL(__crc32c_le);  /** - * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32 + * crc32_be_generic() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32   * @crc: seed value for computation.  ~0 for Ethernet, sometimes 0 for   *	other uses, or the previous crc32 value if computing incrementally. - * @p: pointer to buffer over which CRC is run + * @p: pointer to buffer over which CRC32 is run   * @len: length of buffer @p + * @tab: big-endian Ethernet table + * @polynomial: CRC32 BE polynomial   */  static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,  					  size_t len, const u32 (*tab)[256], diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 37061ede8b81..bf2c8b1043d8 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -381,19 +381,21 @@ void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)   * debug_object_activate - debug checks when an object is activated   * @addr:	address of the object   * @descr:	pointer to an object specific debug description structure + * Returns 0 for success, -EINVAL for check failed.   */ -void debug_object_activate(void *addr, struct debug_obj_descr *descr) +int debug_object_activate(void *addr, struct debug_obj_descr *descr)  {  	enum debug_obj_state state;  	struct debug_bucket *db;  	struct debug_obj *obj;  	unsigned long flags; +	int ret;  	struct debug_obj o = { .object = addr,  			       .state = ODEBUG_STATE_NOTAVAILABLE,  			       .descr = descr };  	if (!debug_objects_enabled) -		return; +		return 0;  	db = get_bucket((unsigned long) addr); @@ -405,23 +407,26 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)  		case ODEBUG_STATE_INIT:  		case ODEBUG_STATE_INACTIVE:  			obj->state = ODEBUG_STATE_ACTIVE; +			ret = 0;  			break;  		case ODEBUG_STATE_ACTIVE:  			debug_print_object(obj, "activate");  			state = obj->state;  			raw_spin_unlock_irqrestore(&db->lock, flags); -			debug_object_fixup(descr->fixup_activate, addr, state); -			return; +			ret = debug_object_fixup(descr->fixup_activate, addr, state); +			return ret ? -EINVAL : 0;  		case ODEBUG_STATE_DESTROYED:  			debug_print_object(obj, "activate"); +			ret = -EINVAL;  			break;  		default: +			ret = 0;  			break;  		}  		raw_spin_unlock_irqrestore(&db->lock, flags); -		return; +		return ret;  	}  	raw_spin_unlock_irqrestore(&db->lock, flags); @@ -431,8 +436,11 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)  	 * true or not.  	 */  	if (debug_object_fixup(descr->fixup_activate, addr, -			   ODEBUG_STATE_NOTAVAILABLE)) +			   ODEBUG_STATE_NOTAVAILABLE)) {  		debug_print_object(&o, "activate"); +		return -EINVAL; +	} +	return 0;  }  /** diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c index 19ff89e34eec..d619b28c456f 100644 --- a/lib/decompress_inflate.c +++ b/lib/decompress_inflate.c @@ -48,7 +48,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len,  		out_len = 0x8000; /* 32 K */  		out_buf = malloc(out_len);  	} else { -		out_len = 0x7fffffff; /* no limit */ +		out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */  	}  	if (!out_buf) {  		error("Out of memory while allocating output buffer"); diff --git a/lib/div64.c b/lib/div64.c index a163b6caef73..4382ad77777e 100644 --- a/lib/div64.c +++ b/lib/div64.c @@ -79,6 +79,46 @@ EXPORT_SYMBOL(div_s64_rem);  #endif  /** + * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder + * @dividend:	64bit dividend + * @divisor:	64bit divisor + * @remainder:  64bit remainder + * + * This implementation is a comparable to algorithm used by div64_u64. + * But this operation, which includes math for calculating the remainder, + * is kept distinct to avoid slowing down the div64_u64 operation on 32bit + * systems. + */ +#ifndef div64_u64_rem +u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) +{ +	u32 high = divisor >> 32; +	u64 quot; + +	if (high == 0) { +		u32 rem32; +		quot = div_u64_rem(dividend, divisor, &rem32); +		*remainder = rem32; +	} else { +		int n = 1 + fls(high); +		quot = div_u64(dividend >> n, divisor >> n); + +		if (quot != 0) +			quot--; + +		*remainder = dividend - quot * divisor; +		if (*remainder >= divisor) { +			quot++; +			*remainder -= divisor; +		} +	} + +	return quot; +} +EXPORT_SYMBOL(div64_u64_rem); +#endif + +/**   * div64_u64 - unsigned 64bit divide with 64bit divisor   * @dividend:	64bit dividend   * @divisor:	64bit divisor diff --git a/lib/dump_stack.c b/lib/dump_stack.c index c03154173cc7..f23b63f0a1c3 100644 --- a/lib/dump_stack.c +++ b/lib/dump_stack.c @@ -23,7 +23,7 @@ static void __dump_stack(void)  #ifdef CONFIG_SMP  static atomic_t dump_lock = ATOMIC_INIT(-1); -void dump_stack(void) +asmlinkage void dump_stack(void)  {  	int was_locked;  	int old; @@ -55,7 +55,7 @@ retry:  	preempt_enable();  }  #else -void dump_stack(void) +asmlinkage void dump_stack(void)  {  	__dump_stack();  } diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 99fec3ae405a..c37aeacd7651 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -309,7 +309,7 @@ static int ddebug_parse_query(char *words[], int nwords,  			struct ddebug_query *query, const char *modname)  {  	unsigned int i; -	int rc; +	int rc = 0;  	/* check we have an even number of words */  	if (nwords % 2 != 0) { diff --git a/lib/earlycpio.c b/lib/earlycpio.c index 7aa7ce250c94..3eb3e4722b8e 100644 --- a/lib/earlycpio.c +++ b/lib/earlycpio.c @@ -49,22 +49,23 @@ enum cpio_fields {  /**   * cpio_data find_cpio_data - Search for files in an uncompressed cpio - * @path:   The directory to search for, including a slash at the end - * @data:   Pointer to the the cpio archive or a header inside - * @len:    Remaining length of the cpio based on data pointer - * @offset: When a matching file is found, this is the offset to the - *          beginning of the cpio. It can be used to iterate through - *          the cpio to find all files inside of a directory path + * @path:       The directory to search for, including a slash at the end + * @data:       Pointer to the the cpio archive or a header inside + * @len:        Remaining length of the cpio based on data pointer + * @nextoff:    When a matching file is found, this is the offset from the + *              beginning of the cpio to the beginning of the next file, not the + *              matching file itself. It can be used to iterate through the cpio + *              to find all files inside of a directory path.   * - * @return: struct cpio_data containing the address, length and - *          filename (with the directory path cut off) of the found file. - *          If you search for a filename and not for files in a directory, - *          pass the absolute path of the filename in the cpio and make sure - *          the match returned an empty filename string. + * @return:     struct cpio_data containing the address, length and + *              filename (with the directory path cut off) of the found file. + *              If you search for a filename and not for files in a directory, + *              pass the absolute path of the filename in the cpio and make sure + *              the match returned an empty filename string.   */  struct cpio_data find_cpio_data(const char *path, void *data, -					  size_t len,  long *offset) +				size_t len,  long *nextoff)  {  	const size_t cpio_header_len = 8*C_NFIELDS - 2;  	struct cpio_data cd = { NULL, 0, "" }; @@ -124,7 +125,7 @@ struct cpio_data find_cpio_data(const char *path, void *data,  		if ((ch[C_MODE] & 0170000) == 0100000 &&  		    ch[C_NAMESIZE] >= mypathsize &&  		    !memcmp(p, path, mypathsize)) { -			*offset = (long)nptr - (long)data; +			*nextoff = (long)nptr - (long)data;  			if (ch[C_NAMESIZE] - mypathsize >= MAX_CPIO_FILE_NAME) {  				pr_warn(  				"File %s exceeding MAX_CPIO_FILE_NAME [%d]\n", diff --git a/lib/genalloc.c b/lib/genalloc.c index b35cfa9bc3d4..26cf20be72b7 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -37,6 +37,11 @@  #include <linux/of_address.h>  #include <linux/of_device.h> +static inline size_t chunk_size(const struct gen_pool_chunk *chunk) +{ +	return chunk->end_addr - chunk->start_addr + 1; +} +  static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)  {  	unsigned long val, nval; @@ -182,13 +187,13 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy  	int nbytes = sizeof(struct gen_pool_chunk) +  				BITS_TO_LONGS(nbits) * sizeof(long); -	chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid); +	chunk = kzalloc_node(nbytes, GFP_KERNEL, nid);  	if (unlikely(chunk == NULL))  		return -ENOMEM;  	chunk->phys_addr = phys;  	chunk->start_addr = virt; -	chunk->end_addr = virt + size; +	chunk->end_addr = virt + size - 1;  	atomic_set(&chunk->avail, size);  	spin_lock(&pool->lock); @@ -213,7 +218,7 @@ phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)  	rcu_read_lock();  	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { -		if (addr >= chunk->start_addr && addr < chunk->end_addr) { +		if (addr >= chunk->start_addr && addr <= chunk->end_addr) {  			paddr = chunk->phys_addr + (addr - chunk->start_addr);  			break;  		} @@ -242,7 +247,7 @@ void gen_pool_destroy(struct gen_pool *pool)  		chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);  		list_del(&chunk->next_chunk); -		end_bit = (chunk->end_addr - chunk->start_addr) >> order; +		end_bit = chunk_size(chunk) >> order;  		bit = find_next_bit(chunk->bits, end_bit, 0);  		BUG_ON(bit < end_bit); @@ -283,7 +288,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)  		if (size > atomic_read(&chunk->avail))  			continue; -		end_bit = (chunk->end_addr - chunk->start_addr) >> order; +		end_bit = chunk_size(chunk) >> order;  retry:  		start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,  				pool->data); @@ -330,8 +335,8 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)  	nbits = (size + (1UL << order) - 1) >> order;  	rcu_read_lock();  	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { -		if (addr >= chunk->start_addr && addr < chunk->end_addr) { -			BUG_ON(addr + size > chunk->end_addr); +		if (addr >= chunk->start_addr && addr <= chunk->end_addr) { +			BUG_ON(addr + size - 1 > chunk->end_addr);  			start_bit = (addr - chunk->start_addr) >> order;  			remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);  			BUG_ON(remain); @@ -400,7 +405,7 @@ size_t gen_pool_size(struct gen_pool *pool)  	rcu_read_lock();  	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) -		size += chunk->end_addr - chunk->start_addr; +		size += chunk_size(chunk);  	rcu_read_unlock();  	return size;  } @@ -519,7 +524,6 @@ struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,  /**   * dev_get_gen_pool - Obtain the gen_pool (if any) for a device   * @dev: device to retrieve the gen_pool from - * @name: Optional name for the gen_pool, usually NULL   *   * Returns the gen_pool for the device if one is present, or NULL.   */ diff --git a/lib/hexdump.c b/lib/hexdump.c index 3f0494c9d57a..8499c810909a 100644 --- a/lib/hexdump.c +++ b/lib/hexdump.c @@ -14,6 +14,8 @@  const char hex_asc[] = "0123456789abcdef";  EXPORT_SYMBOL(hex_asc); +const char hex_asc_upper[] = "0123456789ABCDEF"; +EXPORT_SYMBOL(hex_asc_upper);  /**   * hex_to_bin - convert a hex digit to its real value diff --git a/lib/kobject.c b/lib/kobject.c index 4a1f33d43548..669bf190d4fb 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -545,8 +545,8 @@ static void kobject_cleanup(struct kobject *kobj)  	struct kobj_type *t = get_ktype(kobj);  	const char *name = kobj->name; -	pr_debug("kobject: '%s' (%p): %s\n", -		 kobject_name(kobj), kobj, __func__); +	pr_debug("kobject: '%s' (%p): %s, parent %p\n", +		 kobject_name(kobj), kobj, __func__, kobj->parent);  	if (t && !t->release)  		pr_debug("kobject: '%s' (%p): does not have a release() " @@ -580,9 +580,25 @@ static void kobject_cleanup(struct kobject *kobj)  	}  } +#ifdef CONFIG_DEBUG_KOBJECT_RELEASE +static void kobject_delayed_cleanup(struct work_struct *work) +{ +	kobject_cleanup(container_of(to_delayed_work(work), +				     struct kobject, release)); +} +#endif +  static void kobject_release(struct kref *kref)  { -	kobject_cleanup(container_of(kref, struct kobject, kref)); +	struct kobject *kobj = container_of(kref, struct kobject, kref); +#ifdef CONFIG_DEBUG_KOBJECT_RELEASE +	pr_debug("kobject: '%s' (%p): %s, parent %p (delayed)\n", +		 kobject_name(kobj), kobj, __func__, kobj->parent); +	INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup); +	schedule_delayed_work(&kobj->release, HZ); +#else +	kobject_cleanup(kobj); +#endif  }  /** @@ -915,6 +931,18 @@ const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj)  	return kobj_child_ns_ops(kobj->parent);  } +bool kobj_ns_current_may_mount(enum kobj_ns_type type) +{ +	bool may_mount = true; + +	spin_lock(&kobj_ns_type_lock); +	if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) && +	    kobj_ns_ops_tbl[type]) +		may_mount = kobj_ns_ops_tbl[type]->current_may_mount(); +	spin_unlock(&kobj_ns_type_lock); + +	return may_mount; +}  void *kobj_ns_grab_current(enum kobj_ns_type type)  { diff --git a/lib/lockref.c b/lib/lockref.c new file mode 100644 index 000000000000..6f9d434c1521 --- /dev/null +++ b/lib/lockref.c @@ -0,0 +1,183 @@ +#include <linux/export.h> +#include <linux/lockref.h> + +#ifdef CONFIG_CMPXCHG_LOCKREF + +/* + * Allow weakly-ordered memory architectures to provide barrier-less + * cmpxchg semantics for lockref updates. + */ +#ifndef cmpxchg64_relaxed +# define cmpxchg64_relaxed cmpxchg64 +#endif + +/* + * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP. + * This is useful for architectures with an expensive cpu_relax(). + */ +#ifndef arch_mutex_cpu_relax +# define arch_mutex_cpu_relax() cpu_relax() +#endif + +/* + * Note that the "cmpxchg()" reloads the "old" value for the + * failure case. + */ +#define CMPXCHG_LOOP(CODE, SUCCESS) do {					\ +	struct lockref old;							\ +	BUILD_BUG_ON(sizeof(old) != 8);						\ +	old.lock_count = ACCESS_ONCE(lockref->lock_count);			\ +	while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) {  	\ +		struct lockref new = old, prev = old;				\ +		CODE								\ +		old.lock_count = cmpxchg64_relaxed(&lockref->lock_count,	\ +						   old.lock_count,		\ +						   new.lock_count);		\ +		if (likely(old.lock_count == prev.lock_count)) {		\ +			SUCCESS;						\ +		}								\ +		arch_mutex_cpu_relax();						\ +	}									\ +} while (0) + +#else + +#define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0) + +#endif + +/** + * lockref_get - Increments reference count unconditionally + * @lockref: pointer to lockref structure + * + * This operation is only valid if you already hold a reference + * to the object, so you know the count cannot be zero. + */ +void lockref_get(struct lockref *lockref) +{ +	CMPXCHG_LOOP( +		new.count++; +	, +		return; +	); + +	spin_lock(&lockref->lock); +	lockref->count++; +	spin_unlock(&lockref->lock); +} +EXPORT_SYMBOL(lockref_get); + +/** + * lockref_get_not_zero - Increments count unless the count is 0 + * @lockref: pointer to lockref structure + * Return: 1 if count updated successfully or 0 if count was zero + */ +int lockref_get_not_zero(struct lockref *lockref) +{ +	int retval; + +	CMPXCHG_LOOP( +		new.count++; +		if (!old.count) +			return 0; +	, +		return 1; +	); + +	spin_lock(&lockref->lock); +	retval = 0; +	if (lockref->count) { +		lockref->count++; +		retval = 1; +	} +	spin_unlock(&lockref->lock); +	return retval; +} +EXPORT_SYMBOL(lockref_get_not_zero); + +/** + * lockref_get_or_lock - Increments count unless the count is 0 + * @lockref: pointer to lockref structure + * Return: 1 if count updated successfully or 0 if count was zero + * and we got the lock instead. + */ +int lockref_get_or_lock(struct lockref *lockref) +{ +	CMPXCHG_LOOP( +		new.count++; +		if (!old.count) +			break; +	, +		return 1; +	); + +	spin_lock(&lockref->lock); +	if (!lockref->count) +		return 0; +	lockref->count++; +	spin_unlock(&lockref->lock); +	return 1; +} +EXPORT_SYMBOL(lockref_get_or_lock); + +/** + * lockref_put_or_lock - decrements count unless count <= 1 before decrement + * @lockref: pointer to lockref structure + * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken + */ +int lockref_put_or_lock(struct lockref *lockref) +{ +	CMPXCHG_LOOP( +		new.count--; +		if (old.count <= 1) +			break; +	, +		return 1; +	); + +	spin_lock(&lockref->lock); +	if (lockref->count <= 1) +		return 0; +	lockref->count--; +	spin_unlock(&lockref->lock); +	return 1; +} +EXPORT_SYMBOL(lockref_put_or_lock); + +/** + * lockref_mark_dead - mark lockref dead + * @lockref: pointer to lockref structure + */ +void lockref_mark_dead(struct lockref *lockref) +{ +	assert_spin_locked(&lockref->lock); +	lockref->count = -128; +} + +/** + * lockref_get_not_dead - Increments count unless the ref is dead + * @lockref: pointer to lockref structure + * Return: 1 if count updated successfully or 0 if lockref was dead + */ +int lockref_get_not_dead(struct lockref *lockref) +{ +	int retval; + +	CMPXCHG_LOOP( +		new.count++; +		if ((int)old.count < 0) +			return 0; +	, +		return 1; +	); + +	spin_lock(&lockref->lock); +	retval = 0; +	if ((int) lockref->count >= 0) { +		lockref->count++; +		retval = 1; +	} +	spin_unlock(&lockref->lock); +	return retval; +} +EXPORT_SYMBOL(lockref_get_not_dead); diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c index 411be80ddb46..df6839e3ce08 100644 --- a/lib/lz4/lz4_decompress.c +++ b/lib/lz4/lz4_decompress.c @@ -283,8 +283,8 @@ _output_error:  	return (int) (-(((char *) ip) - source));  } -int lz4_decompress(const char *src, size_t *src_len, char *dest, -		size_t actual_dest_len) +int lz4_decompress(const unsigned char *src, size_t *src_len, +		unsigned char *dest, size_t actual_dest_len)  {  	int ret = -1;  	int input_len = 0; @@ -302,8 +302,8 @@ exit_0:  EXPORT_SYMBOL(lz4_decompress);  #endif -int lz4_decompress_unknownoutputsize(const char *src, size_t src_len, -		char *dest, size_t *dest_len) +int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len, +		unsigned char *dest, size_t *dest_len)  {  	int ret = -1;  	int out_len = 0; diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c new file mode 100644 index 000000000000..bab1ba2a4c71 --- /dev/null +++ b/lib/percpu_ida.c @@ -0,0 +1,335 @@ +/* + * Percpu IDA library + * + * Copyright (C) 2013 Datera, Inc. Kent Overstreet + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * General Public License for more details. + */ + +#include <linux/bitmap.h> +#include <linux/bitops.h> +#include <linux/bug.h> +#include <linux/err.h> +#include <linux/export.h> +#include <linux/hardirq.h> +#include <linux/idr.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/percpu.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/spinlock.h> +#include <linux/percpu_ida.h> + +/* + * Number of tags we move between the percpu freelist and the global freelist at + * a time + */ +#define IDA_PCPU_BATCH_MOVE	32U + +/* Max size of percpu freelist, */ +#define IDA_PCPU_SIZE		((IDA_PCPU_BATCH_MOVE * 3) / 2) + +struct percpu_ida_cpu { +	/* +	 * Even though this is percpu, we need a lock for tag stealing by remote +	 * CPUs: +	 */ +	spinlock_t			lock; + +	/* nr_free/freelist form a stack of free IDs */ +	unsigned			nr_free; +	unsigned			freelist[]; +}; + +static inline void move_tags(unsigned *dst, unsigned *dst_nr, +			     unsigned *src, unsigned *src_nr, +			     unsigned nr) +{ +	*src_nr -= nr; +	memcpy(dst + *dst_nr, src + *src_nr, sizeof(unsigned) * nr); +	*dst_nr += nr; +} + +/* + * Try to steal tags from a remote cpu's percpu freelist. + * + * We first check how many percpu freelists have tags - we don't steal tags + * unless enough percpu freelists have tags on them that it's possible more than + * half the total tags could be stuck on remote percpu freelists. + * + * Then we iterate through the cpus until we find some tags - we don't attempt + * to find the "best" cpu to steal from, to keep cacheline bouncing to a + * minimum. + */ +static inline void steal_tags(struct percpu_ida *pool, +			      struct percpu_ida_cpu *tags) +{ +	unsigned cpus_have_tags, cpu = pool->cpu_last_stolen; +	struct percpu_ida_cpu *remote; + +	for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags); +	     cpus_have_tags * IDA_PCPU_SIZE > pool->nr_tags / 2; +	     cpus_have_tags--) { +		cpu = cpumask_next(cpu, &pool->cpus_have_tags); + +		if (cpu >= nr_cpu_ids) { +			cpu = cpumask_first(&pool->cpus_have_tags); +			if (cpu >= nr_cpu_ids) +				BUG(); +		} + +		pool->cpu_last_stolen = cpu; +		remote = per_cpu_ptr(pool->tag_cpu, cpu); + +		cpumask_clear_cpu(cpu, &pool->cpus_have_tags); + +		if (remote == tags) +			continue; + +		spin_lock(&remote->lock); + +		if (remote->nr_free) { +			memcpy(tags->freelist, +			       remote->freelist, +			       sizeof(unsigned) * remote->nr_free); + +			tags->nr_free = remote->nr_free; +			remote->nr_free = 0; +		} + +		spin_unlock(&remote->lock); + +		if (tags->nr_free) +			break; +	} +} + +/* + * Pop up to IDA_PCPU_BATCH_MOVE IDs off the global freelist, and push them onto + * our percpu freelist: + */ +static inline void alloc_global_tags(struct percpu_ida *pool, +				     struct percpu_ida_cpu *tags) +{ +	move_tags(tags->freelist, &tags->nr_free, +		  pool->freelist, &pool->nr_free, +		  min(pool->nr_free, IDA_PCPU_BATCH_MOVE)); +} + +static inline unsigned alloc_local_tag(struct percpu_ida *pool, +				       struct percpu_ida_cpu *tags) +{ +	int tag = -ENOSPC; + +	spin_lock(&tags->lock); +	if (tags->nr_free) +		tag = tags->freelist[--tags->nr_free]; +	spin_unlock(&tags->lock); + +	return tag; +} + +/** + * percpu_ida_alloc - allocate a tag + * @pool: pool to allocate from + * @gfp: gfp flags + * + * Returns a tag - an integer in the range [0..nr_tags) (passed to + * tag_pool_init()), or otherwise -ENOSPC on allocation failure. + * + * Safe to be called from interrupt context (assuming it isn't passed + * __GFP_WAIT, of course). + * + * @gfp indicates whether or not to wait until a free id is available (it's not + * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep + * however long it takes until another thread frees an id (same semantics as a + * mempool). + * + * Will not fail if passed __GFP_WAIT. + */ +int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp) +{ +	DEFINE_WAIT(wait); +	struct percpu_ida_cpu *tags; +	unsigned long flags; +	int tag; + +	local_irq_save(flags); +	tags = this_cpu_ptr(pool->tag_cpu); + +	/* Fastpath */ +	tag = alloc_local_tag(pool, tags); +	if (likely(tag >= 0)) { +		local_irq_restore(flags); +		return tag; +	} + +	while (1) { +		spin_lock(&pool->lock); + +		/* +		 * prepare_to_wait() must come before steal_tags(), in case +		 * percpu_ida_free() on another cpu flips a bit in +		 * cpus_have_tags +		 * +		 * global lock held and irqs disabled, don't need percpu lock +		 */ +		prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); + +		if (!tags->nr_free) +			alloc_global_tags(pool, tags); +		if (!tags->nr_free) +			steal_tags(pool, tags); + +		if (tags->nr_free) { +			tag = tags->freelist[--tags->nr_free]; +			if (tags->nr_free) +				cpumask_set_cpu(smp_processor_id(), +						&pool->cpus_have_tags); +		} + +		spin_unlock(&pool->lock); +		local_irq_restore(flags); + +		if (tag >= 0 || !(gfp & __GFP_WAIT)) +			break; + +		schedule(); + +		local_irq_save(flags); +		tags = this_cpu_ptr(pool->tag_cpu); +	} + +	finish_wait(&pool->wait, &wait); +	return tag; +} +EXPORT_SYMBOL_GPL(percpu_ida_alloc); + +/** + * percpu_ida_free - free a tag + * @pool: pool @tag was allocated from + * @tag: a tag previously allocated with percpu_ida_alloc() + * + * Safe to be called from interrupt context. + */ +void percpu_ida_free(struct percpu_ida *pool, unsigned tag) +{ +	struct percpu_ida_cpu *tags; +	unsigned long flags; +	unsigned nr_free; + +	BUG_ON(tag >= pool->nr_tags); + +	local_irq_save(flags); +	tags = this_cpu_ptr(pool->tag_cpu); + +	spin_lock(&tags->lock); +	tags->freelist[tags->nr_free++] = tag; + +	nr_free = tags->nr_free; +	spin_unlock(&tags->lock); + +	if (nr_free == 1) { +		cpumask_set_cpu(smp_processor_id(), +				&pool->cpus_have_tags); +		wake_up(&pool->wait); +	} + +	if (nr_free == IDA_PCPU_SIZE) { +		spin_lock(&pool->lock); + +		/* +		 * Global lock held and irqs disabled, don't need percpu +		 * lock +		 */ +		if (tags->nr_free == IDA_PCPU_SIZE) { +			move_tags(pool->freelist, &pool->nr_free, +				  tags->freelist, &tags->nr_free, +				  IDA_PCPU_BATCH_MOVE); + +			wake_up(&pool->wait); +		} +		spin_unlock(&pool->lock); +	} + +	local_irq_restore(flags); +} +EXPORT_SYMBOL_GPL(percpu_ida_free); + +/** + * percpu_ida_destroy - release a tag pool's resources + * @pool: pool to free + * + * Frees the resources allocated by percpu_ida_init(). + */ +void percpu_ida_destroy(struct percpu_ida *pool) +{ +	free_percpu(pool->tag_cpu); +	free_pages((unsigned long) pool->freelist, +		   get_order(pool->nr_tags * sizeof(unsigned))); +} +EXPORT_SYMBOL_GPL(percpu_ida_destroy); + +/** + * percpu_ida_init - initialize a percpu tag pool + * @pool: pool to initialize + * @nr_tags: number of tags that will be available for allocation + * + * Initializes @pool so that it can be used to allocate tags - integers in the + * range [0, nr_tags). Typically, they'll be used by driver code to refer to a + * preallocated array of tag structures. + * + * Allocation is percpu, but sharding is limited by nr_tags - for best + * performance, the workload should not span more cpus than nr_tags / 128. + */ +int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags) +{ +	unsigned i, cpu, order; + +	memset(pool, 0, sizeof(*pool)); + +	init_waitqueue_head(&pool->wait); +	spin_lock_init(&pool->lock); +	pool->nr_tags = nr_tags; + +	/* Guard against overflow */ +	if (nr_tags > (unsigned) INT_MAX + 1) { +		pr_err("percpu_ida_init(): nr_tags too large\n"); +		return -EINVAL; +	} + +	order = get_order(nr_tags * sizeof(unsigned)); +	pool->freelist = (void *) __get_free_pages(GFP_KERNEL, order); +	if (!pool->freelist) +		return -ENOMEM; + +	for (i = 0; i < nr_tags; i++) +		pool->freelist[i] = i; + +	pool->nr_free = nr_tags; + +	pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) + +				       IDA_PCPU_SIZE * sizeof(unsigned), +				       sizeof(unsigned)); +	if (!pool->tag_cpu) +		goto err; + +	for_each_possible_cpu(cpu) +		spin_lock_init(&per_cpu_ptr(pool->tag_cpu, cpu)->lock); + +	return 0; +err: +	percpu_ida_destroy(pool); +	return -ENOMEM; +} +EXPORT_SYMBOL_GPL(percpu_ida_init); diff --git a/lib/radix-tree.c b/lib/radix-tree.c index e7964296fd50..7811ed3b4e70 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -32,6 +32,7 @@  #include <linux/string.h>  #include <linux/bitops.h>  #include <linux/rcupdate.h> +#include <linux/hardirq.h>		/* in_interrupt() */  #ifdef __KERNEL__ @@ -207,7 +208,12 @@ radix_tree_node_alloc(struct radix_tree_root *root)  	struct radix_tree_node *ret = NULL;  	gfp_t gfp_mask = root_gfp_mask(root); -	if (!(gfp_mask & __GFP_WAIT)) { +	/* +	 * Preload code isn't irq safe and it doesn't make sence to use +	 * preloading in the interrupt anyway as all the allocations have to +	 * be atomic. So just do normal allocation when in interrupt. +	 */ +	if (!(gfp_mask & __GFP_WAIT) && !in_interrupt()) {  		struct radix_tree_preload *rtp;  		/* @@ -264,7 +270,7 @@ radix_tree_node_free(struct radix_tree_node *node)   * To make use of this facility, the radix tree must be initialised without   * __GFP_WAIT being passed to INIT_RADIX_TREE().   */ -int radix_tree_preload(gfp_t gfp_mask) +static int __radix_tree_preload(gfp_t gfp_mask)  {  	struct radix_tree_preload *rtp;  	struct radix_tree_node *node; @@ -288,9 +294,40 @@ int radix_tree_preload(gfp_t gfp_mask)  out:  	return ret;  } + +/* + * Load up this CPU's radix_tree_node buffer with sufficient objects to + * ensure that the addition of a single element in the tree cannot fail.  On + * success, return zero, with preemption disabled.  On error, return -ENOMEM + * with preemption not disabled. + * + * To make use of this facility, the radix tree must be initialised without + * __GFP_WAIT being passed to INIT_RADIX_TREE(). + */ +int radix_tree_preload(gfp_t gfp_mask) +{ +	/* Warn on non-sensical use... */ +	WARN_ON_ONCE(!(gfp_mask & __GFP_WAIT)); +	return __radix_tree_preload(gfp_mask); +}  EXPORT_SYMBOL(radix_tree_preload);  /* + * The same as above function, except we don't guarantee preloading happens. + * We do it, if we decide it helps. On success, return zero with preemption + * disabled. On error, return -ENOMEM with preemption not disabled. + */ +int radix_tree_maybe_preload(gfp_t gfp_mask) +{ +	if (gfp_mask & __GFP_WAIT) +		return __radix_tree_preload(gfp_mask); +	/* Preloading doesn't help anything with this gfp mask, skip it */ +	preempt_disable(); +	return 0; +} +EXPORT_SYMBOL(radix_tree_maybe_preload); + +/*   *	Return the maximum key which can be store into a   *	radix tree with height HEIGHT.   */ diff --git a/lib/raid6/.gitignore b/lib/raid6/.gitignore index 162becacf97c..0a7e494b2bcd 100644 --- a/lib/raid6/.gitignore +++ b/lib/raid6/.gitignore @@ -2,3 +2,4 @@ mktables  altivec*.c  int*.c  tables.c +neon?.c diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile index 9f7c184725d7..c7dab0645554 100644 --- a/lib/raid6/Makefile +++ b/lib/raid6/Makefile @@ -5,6 +5,8 @@ raid6_pq-y	+= algos.o recov.o tables.o int1.o int2.o int4.o \  raid6_pq-$(CONFIG_X86) += recov_ssse3.o recov_avx2.o mmx.o sse1.o sse2.o avx2.o  raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o +raid6_pq-$(CONFIG_KERNEL_MODE_NEON) += neon.o neon1.o neon2.o neon4.o neon8.o +raid6_pq-$(CONFIG_TILEGX) += tilegx8.o  hostprogs-y	+= mktables @@ -16,6 +18,21 @@ ifeq ($(CONFIG_ALTIVEC),y)  altivec_flags := -maltivec -mabi=altivec  endif +# The GCC option -ffreestanding is required in order to compile code containing +# ARM/NEON intrinsics in a non C99-compliant environment (such as the kernel) +ifeq ($(CONFIG_KERNEL_MODE_NEON),y) +NEON_FLAGS := -ffreestanding +ifeq ($(ARCH),arm) +NEON_FLAGS += -mfloat-abi=softfp -mfpu=neon +endif +ifeq ($(ARCH),arm64) +CFLAGS_REMOVE_neon1.o += -mgeneral-regs-only +CFLAGS_REMOVE_neon2.o += -mgeneral-regs-only +CFLAGS_REMOVE_neon4.o += -mgeneral-regs-only +CFLAGS_REMOVE_neon8.o += -mgeneral-regs-only +endif +endif +  targets += int1.c  $(obj)/int1.c:   UNROLL := 1  $(obj)/int1.c:   $(src)/int.uc $(src)/unroll.awk FORCE @@ -70,6 +87,35 @@ $(obj)/altivec8.c:   UNROLL := 8  $(obj)/altivec8.c:   $(src)/altivec.uc $(src)/unroll.awk FORCE  	$(call if_changed,unroll) +CFLAGS_neon1.o += $(NEON_FLAGS) +targets += neon1.c +$(obj)/neon1.c:   UNROLL := 1 +$(obj)/neon1.c:   $(src)/neon.uc $(src)/unroll.awk FORCE +	$(call if_changed,unroll) + +CFLAGS_neon2.o += $(NEON_FLAGS) +targets += neon2.c +$(obj)/neon2.c:   UNROLL := 2 +$(obj)/neon2.c:   $(src)/neon.uc $(src)/unroll.awk FORCE +	$(call if_changed,unroll) + +CFLAGS_neon4.o += $(NEON_FLAGS) +targets += neon4.c +$(obj)/neon4.c:   UNROLL := 4 +$(obj)/neon4.c:   $(src)/neon.uc $(src)/unroll.awk FORCE +	$(call if_changed,unroll) + +CFLAGS_neon8.o += $(NEON_FLAGS) +targets += neon8.c +$(obj)/neon8.c:   UNROLL := 8 +$(obj)/neon8.c:   $(src)/neon.uc $(src)/unroll.awk FORCE +	$(call if_changed,unroll) + +targets += tilegx8.c +$(obj)/tilegx8.c:   UNROLL := 8 +$(obj)/tilegx8.c:   $(src)/tilegx.uc $(src)/unroll.awk FORCE +	$(call if_changed,unroll) +  quiet_cmd_mktable = TABLE   $@        cmd_mktable = $(obj)/mktables > $@ || ( rm -f $@ && exit 1 ) diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c index 6d7316fe9f30..f0b1aa3586d1 100644 --- a/lib/raid6/algos.c +++ b/lib/raid6/algos.c @@ -66,10 +66,19 @@ const struct raid6_calls * const raid6_algos[] = {  	&raid6_altivec4,  	&raid6_altivec8,  #endif +#if defined(CONFIG_TILEGX) +	&raid6_tilegx8, +#endif  	&raid6_intx1,  	&raid6_intx2,  	&raid6_intx4,  	&raid6_intx8, +#ifdef CONFIG_KERNEL_MODE_NEON +	&raid6_neonx1, +	&raid6_neonx2, +	&raid6_neonx4, +	&raid6_neonx8, +#endif  	NULL  }; diff --git a/lib/raid6/neon.c b/lib/raid6/neon.c new file mode 100644 index 000000000000..36ad4705df1a --- /dev/null +++ b/lib/raid6/neon.c @@ -0,0 +1,58 @@ +/* + * linux/lib/raid6/neon.c - RAID6 syndrome calculation using ARM NEON intrinsics + * + * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/raid/pq.h> + +#ifdef __KERNEL__ +#include <asm/neon.h> +#else +#define kernel_neon_begin() +#define kernel_neon_end() +#define cpu_has_neon()		(1) +#endif + +/* + * There are 2 reasons these wrappers are kept in a separate compilation unit + * from the actual implementations in neonN.c (generated from neon.uc by + * unroll.awk): + * - the actual implementations use NEON intrinsics, and the GCC support header + *   (arm_neon.h) is not fully compatible (type wise) with the kernel; + * - the neonN.c files are compiled with -mfpu=neon and optimization enabled, + *   and we have to make sure that we never use *any* NEON/VFP instructions + *   outside a kernel_neon_begin()/kernel_neon_end() pair. + */ + +#define RAID6_NEON_WRAPPER(_n)						\ +	static void raid6_neon ## _n ## _gen_syndrome(int disks,	\ +					size_t bytes, void **ptrs)	\ +	{								\ +		void raid6_neon ## _n  ## _gen_syndrome_real(int,	\ +						unsigned long, void**);	\ +		kernel_neon_begin();					\ +		raid6_neon ## _n ## _gen_syndrome_real(disks,		\ +					(unsigned long)bytes, ptrs);	\ +		kernel_neon_end();					\ +	}								\ +	struct raid6_calls const raid6_neonx ## _n = {			\ +		raid6_neon ## _n ## _gen_syndrome,			\ +		raid6_have_neon,					\ +		"neonx" #_n,						\ +		0							\ +	} + +static int raid6_have_neon(void) +{ +	return cpu_has_neon(); +} + +RAID6_NEON_WRAPPER(1); +RAID6_NEON_WRAPPER(2); +RAID6_NEON_WRAPPER(4); +RAID6_NEON_WRAPPER(8); diff --git a/lib/raid6/neon.uc b/lib/raid6/neon.uc new file mode 100644 index 000000000000..1b9ed793342d --- /dev/null +++ b/lib/raid6/neon.uc @@ -0,0 +1,80 @@ +/* ----------------------------------------------------------------------- + * + *   neon.uc - RAID-6 syndrome calculation using ARM NEON instructions + * + *   Copyright (C) 2012 Rob Herring + * + *   Based on altivec.uc: + *     Copyright 2002-2004 H. Peter Anvin - All Rights Reserved + * + *   This program is free software; you can redistribute it and/or modify + *   it under the terms of the GNU General Public License as published by + *   the Free Software Foundation, Inc., 53 Temple Place Ste 330, + *   Boston MA 02111-1307, USA; either version 2 of the License, or + *   (at your option) any later version; incorporated herein by reference. + * + * ----------------------------------------------------------------------- */ + +/* + * neon$#.c + * + * $#-way unrolled NEON intrinsics math RAID-6 instruction set + * + * This file is postprocessed using unroll.awk + */ + +#include <arm_neon.h> + +typedef uint8x16_t unative_t; + +#define NBYTES(x) ((unative_t){x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x}) +#define NSIZE	sizeof(unative_t) + +/* + * The SHLBYTE() operation shifts each byte left by 1, *not* + * rolling over into the next byte + */ +static inline unative_t SHLBYTE(unative_t v) +{ +	return vshlq_n_u8(v, 1); +} + +/* + * The MASK() operation returns 0xFF in any byte for which the high + * bit is 1, 0x00 for any byte for which the high bit is 0. + */ +static inline unative_t MASK(unative_t v) +{ +	const uint8x16_t temp = NBYTES(0); +	return (unative_t)vcltq_s8((int8x16_t)v, (int8x16_t)temp); +} + +void raid6_neon$#_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs) +{ +	uint8_t **dptr = (uint8_t **)ptrs; +	uint8_t *p, *q; +	int d, z, z0; + +	register unative_t wd$$, wq$$, wp$$, w1$$, w2$$; +	const unative_t x1d = NBYTES(0x1d); + +	z0 = disks - 3;		/* Highest data disk */ +	p = dptr[z0+1];		/* XOR parity */ +	q = dptr[z0+2];		/* RS syndrome */ + +	for ( d = 0 ; d < bytes ; d += NSIZE*$# ) { +		wq$$ = wp$$ = vld1q_u8(&dptr[z0][d+$$*NSIZE]); +		for ( z = z0-1 ; z >= 0 ; z-- ) { +			wd$$ = vld1q_u8(&dptr[z][d+$$*NSIZE]); +			wp$$ = veorq_u8(wp$$, wd$$); +			w2$$ = MASK(wq$$); +			w1$$ = SHLBYTE(wq$$); + +			w2$$ = vandq_u8(w2$$, x1d); +			w1$$ = veorq_u8(w1$$, w2$$); +			wq$$ = veorq_u8(w1$$, wd$$); +		} +		vst1q_u8(&p[d+NSIZE*$$], wp$$); +		vst1q_u8(&q[d+NSIZE*$$], wq$$); +	} +} diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile index 087332dbf8aa..29090f3db677 100644 --- a/lib/raid6/test/Makefile +++ b/lib/raid6/test/Makefile @@ -22,19 +22,34 @@ ifeq ($(ARCH),x86_64)          IS_X86 = yes  endif +ifeq ($(ARCH),arm) +        CFLAGS += -I../../../arch/arm/include -mfpu=neon +        HAS_NEON = yes +endif +ifeq ($(ARCH),arm64) +        CFLAGS += -I../../../arch/arm64/include +        HAS_NEON = yes +endif +  ifeq ($(IS_X86),yes)          OBJS   += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o          CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1" |	\                      gcc -c -x assembler - >&/dev/null &&	\                      rm ./-.o && echo -DCONFIG_AS_AVX2=1) +else ifeq ($(HAS_NEON),yes) +        OBJS   += neon.o neon1.o neon2.o neon4.o neon8.o +        CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1  else -        HAS_ALTIVEC := $(shell echo -e '\#include <altivec.h>\nvector int a;' |\ +        HAS_ALTIVEC := $(shell printf '\#include <altivec.h>\nvector int a;\n' |\                           gcc -c -x c - >&/dev/null && \                           rm ./-.o && echo yes)          ifeq ($(HAS_ALTIVEC),yes)                  OBJS += altivec1.o altivec2.o altivec4.o altivec8.o          endif  endif +ifeq ($(ARCH),tilegx) +OBJS += tilegx8.o +endif  .c.o:  	$(CC) $(CFLAGS) -c -o $@ $< @@ -55,6 +70,18 @@ raid6.a: $(OBJS)  raid6test: test.c raid6.a  	$(CC) $(CFLAGS) -o raid6test $^ +neon1.c: neon.uc ../unroll.awk +	$(AWK) ../unroll.awk -vN=1 < neon.uc > $@ + +neon2.c: neon.uc ../unroll.awk +	$(AWK) ../unroll.awk -vN=2 < neon.uc > $@ + +neon4.c: neon.uc ../unroll.awk +	$(AWK) ../unroll.awk -vN=4 < neon.uc > $@ + +neon8.c: neon.uc ../unroll.awk +	$(AWK) ../unroll.awk -vN=8 < neon.uc > $@ +  altivec1.c: altivec.uc ../unroll.awk  	$(AWK) ../unroll.awk -vN=1 < altivec.uc > $@ @@ -85,11 +112,15 @@ int16.c: int.uc ../unroll.awk  int32.c: int.uc ../unroll.awk  	$(AWK) ../unroll.awk -vN=32 < int.uc > $@ +tilegx8.c: tilegx.uc ../unroll.awk +	$(AWK) ../unroll.awk -vN=8 < tilegx.uc > $@ +  tables.c: mktables  	./mktables > tables.c  clean: -	rm -f *.o *.a mktables mktables.c *.uc int*.c altivec*.c tables.c raid6test +	rm -f *.o *.a mktables mktables.c *.uc int*.c altivec*.c neon*.c tables.c raid6test +	rm -f tilegx*.c  spotless: clean  	rm -f *~ diff --git a/lib/raid6/tilegx.uc b/lib/raid6/tilegx.uc new file mode 100644 index 000000000000..e7c29459cbcd --- /dev/null +++ b/lib/raid6/tilegx.uc @@ -0,0 +1,86 @@ +/* -*- linux-c -*- ------------------------------------------------------- * + * + *   Copyright 2002 H. Peter Anvin - All Rights Reserved + *   Copyright 2012 Tilera Corporation - All Rights Reserved + * + *   This program is free software; you can redistribute it and/or modify + *   it under the terms of the GNU General Public License as published by + *   the Free Software Foundation, Inc., 53 Temple Place Ste 330, + *   Boston MA 02111-1307, USA; either version 2 of the License, or + *   (at your option) any later version; incorporated herein by reference. + * + * ----------------------------------------------------------------------- */ + +/* + * tilegx$#.c + * + * $#-way unrolled TILE-Gx SIMD for RAID-6 math. + * + * This file is postprocessed using unroll.awk. + * + */ + +#include <linux/raid/pq.h> + +/* Create 8 byte copies of constant byte */ +# define NBYTES(x) (__insn_v1addi(0, x)) +# define NSIZE  8 + +/* + * The SHLBYTE() operation shifts each byte left by 1, *not* + * rolling over into the next byte + */ +static inline __attribute_const__ u64 SHLBYTE(u64 v) +{ +	/* Vector One Byte Shift Left Immediate. */ +	return __insn_v1shli(v, 1); +} + +/* + * The MASK() operation returns 0xFF in any byte for which the high + * bit is 1, 0x00 for any byte for which the high bit is 0. + */ +static inline __attribute_const__ u64 MASK(u64 v) +{ +	/* Vector One Byte Shift Right Signed Immediate. */ +	return __insn_v1shrsi(v, 7); +} + + +void raid6_tilegx$#_gen_syndrome(int disks, size_t bytes, void **ptrs) +{ +	u8 **dptr = (u8 **)ptrs; +	u64 *p, *q; +	int d, z, z0; + +	u64 wd$$, wq$$, wp$$, w1$$, w2$$; +	u64 x1d = NBYTES(0x1d); +	u64 * z0ptr; + +	z0 = disks - 3;			/* Highest data disk */ +	p = (u64 *)dptr[z0+1];	/* XOR parity */ +	q = (u64 *)dptr[z0+2];	/* RS syndrome */ + +	z0ptr = (u64 *)&dptr[z0][0]; +	for ( d = 0 ; d < bytes ; d += NSIZE*$# ) { +		wq$$ = wp$$ = *z0ptr++; +		for ( z = z0-1 ; z >= 0 ; z-- ) { +			wd$$ = *(u64 *)&dptr[z][d+$$*NSIZE]; +			wp$$ = wp$$ ^ wd$$; +			w2$$ = MASK(wq$$); +			w1$$ = SHLBYTE(wq$$); +			w2$$ = w2$$ & x1d; +			w1$$ = w1$$ ^ w2$$; +			wq$$ = w1$$ ^ wd$$; +		} +		*p++ = wp$$; +		*q++ = wq$$; +	} +} + +const struct raid6_calls raid6_tilegx$# = { +	raid6_tilegx$#_gen_syndrome, +	NULL, +	"tilegx$#", +	0 +}; diff --git a/lib/rbtree.c b/lib/rbtree.c index c0e31fe2fabf..65f4effd117f 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -518,3 +518,43 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new,  	*new = *victim;  }  EXPORT_SYMBOL(rb_replace_node); + +static struct rb_node *rb_left_deepest_node(const struct rb_node *node) +{ +	for (;;) { +		if (node->rb_left) +			node = node->rb_left; +		else if (node->rb_right) +			node = node->rb_right; +		else +			return (struct rb_node *)node; +	} +} + +struct rb_node *rb_next_postorder(const struct rb_node *node) +{ +	const struct rb_node *parent; +	if (!node) +		return NULL; +	parent = rb_parent(node); + +	/* If we're sitting on node, we've already seen our children */ +	if (parent && node == parent->rb_left && parent->rb_right) { +		/* If we are the parent's left node, go to the parent's right +		 * node then all the way down to the left */ +		return rb_left_deepest_node(parent->rb_right); +	} else +		/* Otherwise we are the parent's right node, and the parent +		 * should be next */ +		return (struct rb_node *)parent; +} +EXPORT_SYMBOL(rb_next_postorder); + +struct rb_node *rb_first_postorder(const struct rb_root *root) +{ +	if (!root->rb_node) +		return NULL; + +	return rb_left_deepest_node(root->rb_node); +} +EXPORT_SYMBOL(rb_first_postorder); diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c index 122f02f9941b..31dd4ccd3baa 100644 --- a/lib/rbtree_test.c +++ b/lib/rbtree_test.c @@ -114,6 +114,16 @@ static int black_path_count(struct rb_node *rb)  	return count;  } +static void check_postorder(int nr_nodes) +{ +	struct rb_node *rb; +	int count = 0; +	for (rb = rb_first_postorder(&root); rb; rb = rb_next_postorder(rb)) +		count++; + +	WARN_ON_ONCE(count != nr_nodes); +} +  static void check(int nr_nodes)  {  	struct rb_node *rb; @@ -136,6 +146,8 @@ static void check(int nr_nodes)  	WARN_ON_ONCE(count != nr_nodes);  	WARN_ON_ONCE(count < (1 << black_path_count(rb_last(&root))) - 1); + +	check_postorder(nr_nodes);  }  static void check_augmented(int nr_nodes) diff --git a/lib/swiotlb.c b/lib/swiotlb.c index d23762e6652c..4e8686c7e5a4 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -870,13 +870,13 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,  				swiotlb_full(hwdev, sg->length, dir, 0);  				swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,  						       attrs); -				sgl[0].dma_length = 0; +				sg_dma_len(sgl) = 0;  				return 0;  			}  			sg->dma_address = phys_to_dma(hwdev, map);  		} else  			sg->dma_address = dev_addr; -		sg->dma_length = sg->length; +		sg_dma_len(sg) = sg->length;  	}  	return nelems;  } @@ -904,7 +904,7 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,  	BUG_ON(dir == DMA_NONE);  	for_each_sg(sgl, sg, nelems, i) -		unmap_single(hwdev, sg->dma_address, sg->dma_length, dir); +		unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir);  }  EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); @@ -934,7 +934,7 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,  	for_each_sg(sgl, sg, nelems, i)  		swiotlb_sync_single(hwdev, sg->dma_address, -				    sg->dma_length, dir, target); +				    sg_dma_len(sg), dir, target);  }  void diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 739a36366b79..26559bdb4c49 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -26,6 +26,7 @@  #include <linux/math64.h>  #include <linux/uaccess.h>  #include <linux/ioport.h> +#include <linux/dcache.h>  #include <net/addrconf.h>  #include <asm/page.h>		/* for PAGE_SIZE */ @@ -532,6 +533,81 @@ char *string(char *buf, char *end, const char *s, struct printf_spec spec)  	return buf;  } +static void widen(char *buf, char *end, unsigned len, unsigned spaces) +{ +	size_t size; +	if (buf >= end)	/* nowhere to put anything */ +		return; +	size = end - buf; +	if (size <= spaces) { +		memset(buf, ' ', size); +		return; +	} +	if (len) { +		if (len > size - spaces) +			len = size - spaces; +		memmove(buf + spaces, buf, len); +	} +	memset(buf, ' ', spaces); +} + +static noinline_for_stack +char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_spec spec, +		  const char *fmt) +{ +	const char *array[4], *s; +	const struct dentry *p; +	int depth; +	int i, n; + +	switch (fmt[1]) { +		case '2': case '3': case '4': +			depth = fmt[1] - '0'; +			break; +		default: +			depth = 1; +	} + +	rcu_read_lock(); +	for (i = 0; i < depth; i++, d = p) { +		p = ACCESS_ONCE(d->d_parent); +		array[i] = ACCESS_ONCE(d->d_name.name); +		if (p == d) { +			if (i) +				array[i] = ""; +			i++; +			break; +		} +	} +	s = array[--i]; +	for (n = 0; n != spec.precision; n++, buf++) { +		char c = *s++; +		if (!c) { +			if (!i) +				break; +			c = '/'; +			s = array[--i]; +		} +		if (buf < end) +			*buf = c; +	} +	rcu_read_unlock(); +	if (n < spec.field_width) { +		/* we want to pad the sucker */ +		unsigned spaces = spec.field_width - n; +		if (!(spec.flags & LEFT)) { +			widen(buf - n, end, n, spaces); +			return buf + spaces; +		} +		while (spaces--) { +			if (buf < end) +				*buf = ' '; +			++buf; +		} +	} +	return buf; +} +  static noinline_for_stack  char *symbol_string(char *buf, char *end, void *ptr,  		    struct printf_spec spec, const char *fmt) @@ -1253,6 +1329,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,  		spec.base = 16;  		return number(buf, end,  			      (unsigned long long) *((phys_addr_t *)ptr), spec); +	case 'd': +		return dentry_name(buf, end, ptr, spec, fmt); +	case 'D': +		return dentry_name(buf, end, +				   ((const struct file *)ptr)->f_path.dentry, +				   spec, fmt);  	}  	spec.flags |= SMALL;  	if (spec.field_width == -1) { | 
