diff options
Diffstat (limited to 'include/linux')
177 files changed, 2917 insertions, 1583 deletions
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 75e582b8d2d9..4c328fef403c 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -36,7 +36,7 @@ struct dma_chan_ref { /** * async_tx_flags - modifiers for the async_* calls * @ASYNC_TX_XOR_ZERO_DST: this flag must be used for xor operations where the - * the destination address is not a source. The asynchronous case handles this + * destination address is not a source. The asynchronous case handles this * implicitly, the synchronous case needs to zero the destination block. * @ASYNC_TX_XOR_DROP_DST: this flag must be used if the destination address is * also one of the source addresses. In the synchronous case the destination diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 56e4580d4f55..614653e07e3a 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -14,113 +14,336 @@ #include <linux/mutex.h> #include <linux/notifier.h> -/* Notes on locking: - * - * backlight_device->ops_lock is an internal backlight lock protecting the - * ops pointer and no code outside the core should need to touch it. - * - * Access to update_status() is serialised by the update_lock mutex since - * most drivers seem to need this and historically get it wrong. - * - * Most drivers don't need locking on their get_brightness() method. - * If yours does, you need to implement it in the driver. You can use the - * update_lock mutex if appropriate. +/** + * enum backlight_update_reason - what method was used to update backlight * - * Any other use of the locks below is probably wrong. + * A driver indicates the method (reason) used for updating the backlight + * when calling backlight_force_update(). */ - enum backlight_update_reason { + /** + * @BACKLIGHT_UPDATE_HOTKEY: The backlight was updated using a hot-key. + */ BACKLIGHT_UPDATE_HOTKEY, + + /** + * @BACKLIGHT_UPDATE_SYSFS: The backlight was updated using sysfs. + */ BACKLIGHT_UPDATE_SYSFS, }; +/** + * enum backlight_type - the type of backlight control + * + * The type of interface used to control the backlight. + */ enum backlight_type { + /** + * @BACKLIGHT_RAW: + * + * The backlight is controlled using hardware registers. + */ BACKLIGHT_RAW = 1, + + /** + * @BACKLIGHT_PLATFORM: + * + * The backlight is controlled using a platform-specific interface. + */ BACKLIGHT_PLATFORM, + + /** + * @BACKLIGHT_FIRMWARE: + * + * The backlight is controlled using a standard firmware interface. + */ BACKLIGHT_FIRMWARE, + + /** + * @BACKLIGHT_TYPE_MAX: Number of entries. + */ BACKLIGHT_TYPE_MAX, }; +/** + * enum backlight_notification - the type of notification + * + * The notifications that is used for notification sent to the receiver + * that registered notifications using backlight_register_notifier(). + */ enum backlight_notification { + /** + * @BACKLIGHT_REGISTERED: The backlight device is registered. + */ BACKLIGHT_REGISTERED, + + /** + * @BACKLIGHT_UNREGISTERED: The backlight revice is unregistered. + */ BACKLIGHT_UNREGISTERED, }; +/** enum backlight_scale - the type of scale used for brightness values + * + * The type of scale used for brightness values. + */ enum backlight_scale { + /** + * @BACKLIGHT_SCALE_UNKNOWN: The scale is unknown. + */ BACKLIGHT_SCALE_UNKNOWN = 0, + + /** + * @BACKLIGHT_SCALE_LINEAR: The scale is linear. + * + * The linear scale will increase brightness the same for each step. + */ BACKLIGHT_SCALE_LINEAR, + + /** + * @BACKLIGHT_SCALE_NON_LINEAR: The scale is not linear. + * + * This is often used when the brightness values tries to adjust to + * the relative perception of the eye demanding a non-linear scale. + */ BACKLIGHT_SCALE_NON_LINEAR, }; struct backlight_device; struct fb_info; +/** + * struct backlight_ops - backlight operations + * + * The backlight operations are specified when the backlight device is registered. + */ struct backlight_ops { + /** + * @options: Configure how operations are called from the core. + * + * The options parameter is used to adjust the behaviour of the core. + * Set BL_CORE_SUSPENDRESUME to get the update_status() operation called + * upon suspend and resume. + */ unsigned int options; #define BL_CORE_SUSPENDRESUME (1 << 0) - /* Notify the backlight driver some property has changed */ + /** + * @update_status: Operation called when properties have changed. + * + * Notify the backlight driver some property has changed. + * The update_status operation is protected by the update_lock. + * + * The backlight driver is expected to use backlight_is_blank() + * to check if the display is blanked and set brightness accordingly. + * update_status() is called when any of the properties has changed. + * + * RETURNS: + * + * 0 on success, negative error code if any failure occurred. + */ int (*update_status)(struct backlight_device *); - /* Return the current backlight brightness (accounting for power, - fb_blank etc.) */ + + /** + * @get_brightness: Return the current backlight brightness. + * + * The driver may implement this as a readback from the HW. + * This operation is optional and if not present then the current + * brightness property value is used. + * + * RETURNS: + * + * A brightness value which is 0 or a positive number. + * On failure a negative error code is returned. + */ int (*get_brightness)(struct backlight_device *); - /* Check if given framebuffer device is the one bound to this backlight; - return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */ - int (*check_fb)(struct backlight_device *, struct fb_info *); + + /** + * @check_fb: Check the framebuffer device. + * + * Check if given framebuffer device is the one bound to this backlight. + * This operation is optional and if not implemented it is assumed that the + * fbdev is always the one bound to the backlight. + * + * RETURNS: + * + * If info is NULL or the info matches the fbdev bound to the backlight return true. + * If info does not match the fbdev bound to the backlight return false. + */ + int (*check_fb)(struct backlight_device *bd, struct fb_info *info); }; -/* This structure defines all the properties of a backlight */ +/** + * struct backlight_properties - backlight properties + * + * This structure defines all the properties of a backlight. + */ struct backlight_properties { - /* Current User requested brightness (0 - max_brightness) */ + /** + * @brightness: The current brightness requested by the user. + * + * The backlight core makes sure the range is (0 to max_brightness) + * when the brightness is set via the sysfs attribute: + * /sys/class/backlight/<backlight>/brightness. + * + * This value can be set in the backlight_properties passed + * to devm_backlight_device_register() to set a default brightness + * value. + */ int brightness; - /* Maximal value for brightness (read-only) */ + + /** + * @max_brightness: The maximum brightness value. + * + * This value must be set in the backlight_properties passed to + * devm_backlight_device_register() and shall not be modified by the + * driver after registration. + */ int max_brightness; - /* Current FB Power mode (0: full on, 1..3: power saving - modes; 4: full off), see FB_BLANK_XXX */ + + /** + * @power: The current power mode. + * + * User space can configure the power mode using the sysfs + * attribute: /sys/class/backlight/<backlight>/bl_power + * When the power property is updated update_status() is called. + * + * The possible values are: (0: full on, 1 to 3: power saving + * modes; 4: full off), see FB_BLANK_XXX. + * + * When the backlight device is enabled @power is set + * to FB_BLANK_UNBLANK. When the backlight device is disabled + * @power is set to FB_BLANK_POWERDOWN. + */ int power; - /* FB Blanking active? (values as for power) */ - /* Due to be removed, please use (state & BL_CORE_FBBLANK) */ + + /** + * @fb_blank: The power state from the FBIOBLANK ioctl. + * + * When the FBIOBLANK ioctl is called @fb_blank is set to the + * blank parameter and the update_status() operation is called. + * + * When the backlight device is enabled @fb_blank is set + * to FB_BLANK_UNBLANK. When the backlight device is disabled + * @fb_blank is set to FB_BLANK_POWERDOWN. + * + * Backlight drivers should avoid using this property. It has been + * replaced by state & BL_CORE_FBLANK (although most drivers should + * use backlight_is_blank() as the preferred means to get the blank + * state). + * + * fb_blank is deprecated and will be removed. + */ int fb_blank; - /* Backlight type */ + + /** + * @type: The type of backlight supported. + * + * The backlight type allows userspace to make appropriate + * policy decisions based on the backlight type. + * + * This value must be set in the backlight_properties + * passed to devm_backlight_device_register(). + */ enum backlight_type type; - /* Flags used to signal drivers of state changes */ + + /** + * @state: The state of the backlight core. + * + * The state is a bitmask. BL_CORE_FBBLANK is set when the display + * is expected to be blank. BL_CORE_SUSPENDED is set when the + * driver is suspended. + * + * backlight drivers are expected to use backlight_is_blank() + * in their update_status() operation rather than reading the + * state property. + * + * The state is maintained by the core and drivers may not modify it. + */ unsigned int state; - /* Type of the brightness scale (linear, non-linear, ...) */ - enum backlight_scale scale; #define BL_CORE_SUSPENDED (1 << 0) /* backlight is suspended */ #define BL_CORE_FBBLANK (1 << 1) /* backlight is under an fb blank event */ + /** + * @scale: The type of the brightness scale. + */ + enum backlight_scale scale; }; +/** + * struct backlight_device - backlight device data + * + * This structure holds all data required by a backlight device. + */ struct backlight_device { - /* Backlight properties */ + /** + * @props: Backlight properties + */ struct backlight_properties props; - /* Serialise access to update_status method */ + /** + * @update_lock: The lock used when calling the update_status() operation. + * + * update_lock is an internal backlight lock that serialise access + * to the update_status() operation. The backlight core holds the update_lock + * when calling the update_status() operation. The update_lock shall not + * be used by backlight drivers. + */ struct mutex update_lock; - /* This protects the 'ops' field. If 'ops' is NULL, the driver that - registered this device has been unloaded, and if class_get_devdata() - points to something in the body of that driver, it is also invalid. */ + /** + * @ops_lock: The lock used around everything related to backlight_ops. + * + * ops_lock is an internal backlight lock that protects the ops pointer + * and is used around all accesses to ops and when the operations are + * invoked. The ops_lock shall not be used by backlight drivers. + */ struct mutex ops_lock; + + /** + * @ops: Pointer to the backlight operations. + * + * If ops is NULL, the driver that registered this device has been unloaded, + * and if class_get_devdata() points to something in the body of that driver, + * it is also invalid. + */ const struct backlight_ops *ops; - /* The framebuffer notifier block */ + /** + * @fb_notif: The framebuffer notifier block + */ struct notifier_block fb_notif; - /* list entry of all registered backlight devices */ + /** + * @entry: List entry of all registered backlight devices + */ struct list_head entry; + /** + * @dev: Parent device. + */ struct device dev; - /* Multiple framebuffers may share one backlight device */ + /** + * @fb_bl_on: The state of individual fbdev's. + * + * Multiple fbdev's may share one backlight device. The fb_bl_on + * records the state of the individual fbdev. + */ bool fb_bl_on[FB_MAX]; + /** + * @use_count: The number of uses of fb_bl_on. + */ int use_count; }; +/** + * backlight_update_status - force an update of the backlight device status + * @bd: the backlight device + */ static inline int backlight_update_status(struct backlight_device *bd) { int ret = -ENOENT; @@ -166,49 +389,83 @@ static inline int backlight_disable(struct backlight_device *bd) } /** - * backlight_put - Drop backlight reference - * @bd: the backlight device to put + * backlight_is_blank - Return true if display is expected to be blank + * @bd: the backlight device + * + * Display is expected to be blank if any of these is true:: + * + * 1) if power in not UNBLANK + * 2) if fb_blank is not UNBLANK + * 3) if state indicate BLANK or SUSPENDED + * + * Returns true if display is expected to be blank, false otherwise. */ -static inline void backlight_put(struct backlight_device *bd) +static inline bool backlight_is_blank(const struct backlight_device *bd) { - if (bd) - put_device(&bd->dev); + return bd->props.power != FB_BLANK_UNBLANK || + bd->props.fb_blank != FB_BLANK_UNBLANK || + bd->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK); } -extern struct backlight_device *backlight_device_register(const char *name, - struct device *dev, void *devdata, const struct backlight_ops *ops, - const struct backlight_properties *props); -extern struct backlight_device *devm_backlight_device_register( - struct device *dev, const char *name, struct device *parent, - void *devdata, const struct backlight_ops *ops, - const struct backlight_properties *props); -extern void backlight_device_unregister(struct backlight_device *bd); -extern void devm_backlight_device_unregister(struct device *dev, - struct backlight_device *bd); -extern void backlight_force_update(struct backlight_device *bd, - enum backlight_update_reason reason); -extern int backlight_register_notifier(struct notifier_block *nb); -extern int backlight_unregister_notifier(struct notifier_block *nb); -extern struct backlight_device *backlight_device_get_by_type(enum backlight_type type); +/** + * backlight_get_brightness - Returns the current brightness value + * @bd: the backlight device + * + * Returns the current brightness value, taking in consideration the current + * state. If backlight_is_blank() returns true then return 0 as brightness + * otherwise return the current brightness property value. + * + * Backlight drivers are expected to use this function in their update_status() + * operation to get the brightness value. + */ +static inline int backlight_get_brightness(const struct backlight_device *bd) +{ + if (backlight_is_blank(bd)) + return 0; + else + return bd->props.brightness; +} + +struct backlight_device * +backlight_device_register(const char *name, struct device *dev, void *devdata, + const struct backlight_ops *ops, + const struct backlight_properties *props); +struct backlight_device * +devm_backlight_device_register(struct device *dev, const char *name, + struct device *parent, void *devdata, + const struct backlight_ops *ops, + const struct backlight_properties *props); +void backlight_device_unregister(struct backlight_device *bd); +void devm_backlight_device_unregister(struct device *dev, + struct backlight_device *bd); +void backlight_force_update(struct backlight_device *bd, + enum backlight_update_reason reason); +int backlight_register_notifier(struct notifier_block *nb); +int backlight_unregister_notifier(struct notifier_block *nb); struct backlight_device *backlight_device_get_by_name(const char *name); -extern int backlight_device_set_brightness(struct backlight_device *bd, unsigned long brightness); +struct backlight_device *backlight_device_get_by_type(enum backlight_type type); +int backlight_device_set_brightness(struct backlight_device *bd, + unsigned long brightness); #define to_backlight_device(obj) container_of(obj, struct backlight_device, dev) +/** + * bl_get_data - access devdata + * @bl_dev: pointer to backlight device + * + * When a backlight device is registered the driver has the possibility + * to supply a void * devdata. bl_get_data() return a pointer to the + * devdata. + * + * RETURNS: + * + * pointer to devdata stored while registering the backlight device. + */ static inline void * bl_get_data(struct backlight_device *bl_dev) { return dev_get_drvdata(&bl_dev->dev); } -struct generic_bl_info { - const char *name; - int max_intensity; - int default_intensity; - int limit_mask; - void (*set_bl_intensity)(int intensity); - void (*kick_battery)(void); -}; - #ifdef CONFIG_OF struct backlight_device *of_find_backlight_by_node(struct device_node *node); #else @@ -220,14 +477,8 @@ of_find_backlight_by_node(struct device_node *node) #endif #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) -struct backlight_device *of_find_backlight(struct device *dev); struct backlight_device *devm_of_find_backlight(struct device *dev); #else -static inline struct backlight_device *of_find_backlight(struct device *dev) -{ - return NULL; -} - static inline struct backlight_device * devm_of_find_backlight(struct device *dev) { diff --git a/include/linux/btree.h b/include/linux/btree.h index 68f858c831b1..243ee544397a 100644 --- a/include/linux/btree.h +++ b/include/linux/btree.h @@ -10,7 +10,7 @@ * * A B+Tree is a data structure for looking up arbitrary (currently allowing * unsigned long, u32, u64 and 2 * u64) keys into pointers. The data structure - * is described at http://en.wikipedia.org/wiki/B-tree, we currently do not + * is described at https://en.wikipedia.org/wiki/B-tree, we currently do not * use binary search to find the key on lookups. * * Each B+Tree consists of a head, that contains bookkeeping information and diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h index 39e6f4c57580..fcd84e8d88f4 100644 --- a/include/linux/ceph/ceph_features.h +++ b/include/linux/ceph/ceph_features.h @@ -58,7 +58,7 @@ * because 10.2.z (jewel) did not care if its peers advertised this * feature bit. * - * - In the second phase we stop advertising the the bit and call it + * - In the second phase we stop advertising the bit and call it * RETIRED. This can normally be done in the *next* major release * following the one in which we marked the feature DEPRECATED. In * the above example, for 12.0.z (luminous) we can say: diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index ebf5ba62b772..455e9b9e2adf 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -130,6 +130,7 @@ struct ceph_dir_layout { #define CEPH_MSG_CLIENT_REQUEST 24 #define CEPH_MSG_CLIENT_REQUEST_FORWARD 25 #define CEPH_MSG_CLIENT_REPLY 26 +#define CEPH_MSG_CLIENT_METRICS 29 #define CEPH_MSG_CLIENT_CAPS 0x310 #define CEPH_MSG_CLIENT_LEASE 0x311 #define CEPH_MSG_CLIENT_SNAP 0x312 diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index e5ed1c541e7f..c8645f0b797d 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -282,6 +282,7 @@ extern struct kmem_cache *ceph_dentry_cachep; extern struct kmem_cache *ceph_file_cachep; extern struct kmem_cache *ceph_dir_file_cachep; extern struct kmem_cache *ceph_mds_request_cachep; +extern mempool_t *ceph_wb_pagevec_pool; /* ceph_common.c */ extern bool libceph_compatible(void *data); diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index c60b59e9291b..83fa08a06507 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -404,7 +404,7 @@ void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc); &__oreq->r_ops[__whch].typ.fld; \ }) -extern void osd_req_op_init(struct ceph_osd_request *osd_req, +struct ceph_osd_req_op *osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, u32 flags); extern void osd_req_op_raw_data_in_pages(struct ceph_osd_request *, diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index bd1ee9039558..03a5de5f99f4 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -189,7 +189,7 @@ struct clk_duty { * and >= numerator) Return 0 on success, otherwise -EERROR. * * @init: Perform platform-specific initialization magic. - * This is not not used by any of the basic clock types. + * This is not used by any of the basic clock types. * This callback exist for HW which needs to perform some * initialisation magic for CCF to get an accurate view of the * clock. It may also be used dynamic resource allocation is @@ -1096,7 +1096,6 @@ int clk_hw_get_parent_index(struct clk_hw *hw); int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *new_parent); unsigned int __clk_get_enable_count(struct clk *clk); unsigned long clk_hw_get_rate(const struct clk_hw *hw); -unsigned long __clk_get_flags(struct clk *clk); unsigned long clk_hw_get_flags(const struct clk_hw *hw); #define clk_hw_can_set_rate_parent(hw) \ (clk_hw_get_flags((hw)) & CLK_SET_RATE_PARENT) diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h index 49a53a137610..a4f82e836a7c 100644 --- a/include/linux/clk/at91_pmc.h +++ b/include/linux/clk/at91_pmc.h @@ -59,6 +59,7 @@ #define AT91_PMC_PLL_UPDT 0x1C /* PMC PLL update register [for SAM9X60] */ #define AT91_PMC_PLL_UPDT_UPDATE (1 << 8) /* Update PLL settings */ #define AT91_PMC_PLL_UPDT_ID (1 << 0) /* PLL ID */ +#define AT91_PMC_PLL_UPDT_ID_MSK (0xf) /* PLL ID mask */ #define AT91_PMC_PLL_UPDT_STUPTIM (0xff << 16) /* Startup time */ #define AT91_CKGR_MOR 0x20 /* Main Oscillator Register [not on SAM9RL] */ @@ -136,6 +137,8 @@ #define AT91_PMC_PLLADIV2_ON (1 << 12) #define AT91_PMC_H32MXDIV BIT(24) +#define AT91_PMC_XTALF 0x34 /* Main XTAL Frequency Register [SAMA7G5 only] */ + #define AT91_PMC_USB 0x38 /* USB Clock Register [some SAM9 only] */ #define AT91_PMC_USBS (0x1 << 0) /* USB OHCI Input clock selection */ #define AT91_PMC_USBS_PLLA (0 << 0) @@ -174,6 +177,7 @@ #define AT91_PMC_MOSCRCS (1 << 17) /* Main On-Chip RC [some SAM9] */ #define AT91_PMC_CFDEV (1 << 18) /* Clock Failure Detector Event [some SAM9] */ #define AT91_PMC_GCKRDY (1 << 24) /* Generated Clocks */ +#define AT91_PMC_MCKXRDY (1 << 26) /* Master Clock x [x=1..4] Ready Status */ #define AT91_PMC_IMR 0x6c /* Interrupt Mask Register */ #define AT91_PMC_FSMR 0x70 /* Fast Startup Mode Register */ diff --git a/include/linux/clock_cooling.h b/include/linux/clock_cooling.h deleted file mode 100644 index 4b0a69863656..000000000000 --- a/include/linux/clock_cooling.h +++ /dev/null @@ -1,57 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * linux/include/linux/clock_cooling.h - * - * Copyright (C) 2014 Eduardo Valentin <edubezval@gmail.com> - * - * Copyright (C) 2013 Texas Instruments Inc. - * Contact: Eduardo Valentin <eduardo.valentin@ti.com> - * - * Highly based on cpufreq_cooling.c. - * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com) - * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org> - */ - -#ifndef __CPU_COOLING_H__ -#define __CPU_COOLING_H__ - -#include <linux/of.h> -#include <linux/thermal.h> -#include <linux/cpumask.h> - -#ifdef CONFIG_CLOCK_THERMAL -/** - * clock_cooling_register - function to create clock cooling device. - * @dev: struct device pointer to the device used as clock cooling device. - * @clock_name: string containing the clock used as cooling mechanism. - */ -struct thermal_cooling_device * -clock_cooling_register(struct device *dev, const char *clock_name); - -/** - * clock_cooling_unregister - function to remove clock cooling device. - * @cdev: thermal cooling device pointer. - */ -void clock_cooling_unregister(struct thermal_cooling_device *cdev); - -unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev, - unsigned long freq); -#else /* !CONFIG_CLOCK_THERMAL */ -static inline struct thermal_cooling_device * -clock_cooling_register(struct device *dev, const char *clock_name) -{ - return NULL; -} -static inline -void clock_cooling_unregister(struct thermal_cooling_device *cdev) -{ -} -static inline -unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev, - unsigned long freq) -{ - return THERMAL_CSTATE_INVALID; -} -#endif /* CONFIG_CLOCK_THERMAL */ - -#endif /* __CPU_COOLING_H__ */ diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 6fa0eea3f530..25a521d299c1 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -85,11 +85,13 @@ static inline unsigned long compact_gap(unsigned int order) #ifdef CONFIG_COMPACTION extern int sysctl_compact_memory; +extern unsigned int sysctl_compaction_proactiveness; extern int sysctl_compaction_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos); extern int sysctl_extfrag_threshold; extern int sysctl_compact_unevictable_allowed; +extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order); extern int fragmentation_index(struct zone *zone, unsigned int order); extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 8a072d00e688..cee0c728d39a 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -40,7 +40,7 @@ #endif /* - * Not all versions of clang implement the the type-generic versions + * Not all versions of clang implement the type-generic versions * of the builtin overflow checkers. Fortunately, clang implements * __has_builtin allowing us to avoid awkward version * checks. Unfortunately, we don't know which version of gcc clang diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 2e231ba8fe3f..4b33cb385f96 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -5,48 +5,54 @@ #ifndef __ASSEMBLY__ #ifdef __CHECKER__ +/* address spaces */ # define __kernel __attribute__((address_space(0))) # define __user __attribute__((noderef, address_space(__user))) -# define __safe __attribute__((safe)) -# define __force __attribute__((force)) -# define __nocast __attribute__((nocast)) # define __iomem __attribute__((noderef, address_space(__iomem))) +# define __percpu __attribute__((noderef, address_space(__percpu))) +# define __rcu __attribute__((noderef, address_space(__rcu))) +extern void __chk_user_ptr(const volatile void __user *); +extern void __chk_io_ptr(const volatile void __iomem *); +/* context/locking */ # define __must_hold(x) __attribute__((context(x,1,1))) # define __acquires(x) __attribute__((context(x,0,1))) # define __releases(x) __attribute__((context(x,1,0))) # define __acquire(x) __context__(x,1) # define __release(x) __context__(x,-1) # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) -# define __percpu __attribute__((noderef, address_space(__percpu))) -# define __rcu __attribute__((noderef, address_space(__rcu))) +/* other */ +# define __force __attribute__((force)) +# define __nocast __attribute__((nocast)) +# define __safe __attribute__((safe)) # define __private __attribute__((noderef)) -extern void __chk_user_ptr(const volatile void __user *); -extern void __chk_io_ptr(const volatile void __iomem *); # define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member)) #else /* __CHECKER__ */ +/* address spaces */ +# define __kernel # ifdef STRUCTLEAK_PLUGIN -# define __user __attribute__((user)) +# define __user __attribute__((user)) # else # define __user # endif -# define __kernel -# define __safe -# define __force -# define __nocast # define __iomem -# define __chk_user_ptr(x) (void)0 -# define __chk_io_ptr(x) (void)0 -# define __builtin_warning(x, y...) (1) +# define __percpu +# define __rcu +# define __chk_user_ptr(x) (void)0 +# define __chk_io_ptr(x) (void)0 +/* context/locking */ # define __must_hold(x) # define __acquires(x) # define __releases(x) -# define __acquire(x) (void)0 -# define __release(x) (void)0 +# define __acquire(x) (void)0 +# define __release(x) (void)0 # define __cond_lock(x,c) (c) -# define __percpu -# define __rcu +/* other */ +# define __force +# define __nocast +# define __safe # define __private # define ACCESS_PRIVATE(p, member) ((p)->member) +# define __builtin_warning(x, y...) (1) #endif /* __CHECKER__ */ /* Indirect macros required for expanded argument pasting, eg. __LINE__. */ diff --git a/include/linux/console.h b/include/linux/console.h index 75dd20650fbe..0670d3491e0e 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -24,17 +24,13 @@ struct module; struct tty_struct; struct notifier_block; -/* - * this is what the terminal answers to a ESC-Z or csi0c query. - */ -#define VT100ID "\033[?1;2c" -#define VT102ID "\033[?6c" - enum con_scroll { SM_UP, SM_DOWN, }; +enum vc_intensity; + /** * struct consw - callbacks for consoles * @@ -74,8 +70,9 @@ struct consw { void (*con_scrolldelta)(struct vc_data *vc, int lines); int (*con_set_origin)(struct vc_data *vc); void (*con_save_screen)(struct vc_data *vc); - u8 (*con_build_attr)(struct vc_data *vc, u8 color, u8 intensity, - u8 blink, u8 underline, u8 reverse, u8 italic); + u8 (*con_build_attr)(struct vc_data *vc, u8 color, + enum vc_intensity intensity, + bool blink, bool underline, bool reverse, bool italic); void (*con_invert_region)(struct vc_data *vc, u16 *p, int count); u16 *(*con_screen_pos)(struct vc_data *vc, int offset); unsigned long (*con_getxy)(struct vc_data *vc, unsigned long position, diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index 24d4c16e3ae0..153734816b49 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -21,6 +21,43 @@ struct uni_pagedir; struct uni_screen; #define NPAR 16 +#define VC_TABSTOPS_COUNT 256U + +enum vc_intensity { + VCI_HALF_BRIGHT, + VCI_NORMAL, + VCI_BOLD, + VCI_MASK = 0x3, +}; + +/** + * struct vc_state -- state of a VC + * @x: cursor's x-position + * @y: cursor's y-position + * @color: foreground & background colors + * @Gx_charset: what's G0/G1 slot set to (like GRAF_MAP, LAT1_MAP) + * @charset: what character set to use (0=G0 or 1=G1) + * @intensity: see enum vc_intensity for values + * @reverse: reversed foreground/background colors + * + * These members are defined separately from struct vc_data as we save & + * restore them at times. + */ +struct vc_state { + unsigned int x, y; + + unsigned char color; + + unsigned char Gx_charset[2]; + unsigned int charset : 1; + + /* attribute flags */ + enum vc_intensity intensity; + bool italic; + bool underline; + bool blink; + bool reverse; +}; /* * Example: vc_data of a console that was scrolled 3 lines down. @@ -57,6 +94,8 @@ struct uni_screen; struct vc_data { struct tty_port port; /* Upper level data */ + struct vc_state state, saved_state; + unsigned short vc_num; /* Console number */ unsigned int vc_cols; /* [#] Console size */ unsigned int vc_rows; @@ -73,8 +112,6 @@ struct vc_data { /* attributes for all characters on screen */ unsigned char vc_attr; /* Current attributes */ unsigned char vc_def_color; /* Default colors */ - unsigned char vc_color; /* Foreground & background */ - unsigned char vc_s_color; /* Saved foreground & background */ unsigned char vc_ulcolor; /* Color for underline mode */ unsigned char vc_itcolor; unsigned char vc_halfcolor; /* Color for half intensity mode */ @@ -82,8 +119,6 @@ struct vc_data { unsigned int vc_cursor_type; unsigned short vc_complement_mask; /* [#] Xor mask for mouse pointer */ unsigned short vc_s_complement_mask; /* Saved mouse pointer mask */ - unsigned int vc_x, vc_y; /* Cursor position */ - unsigned int vc_saved_x, vc_saved_y; unsigned long vc_pos; /* Cursor address */ /* fonts */ unsigned short vc_hi_font_mask; /* [#] Attribute set for upper 256 chars of font or 0 if not supported */ @@ -98,8 +133,6 @@ struct vc_data { int vt_newvt; wait_queue_head_t paste_wait; /* mode flags */ - unsigned int vc_charset : 1; /* Character set G0 / G1 */ - unsigned int vc_s_charset : 1; /* Saved character set */ unsigned int vc_disp_ctrl : 1; /* Display chars < 32? */ unsigned int vc_toggle_meta : 1; /* Toggle high bit? */ unsigned int vc_decscnm : 1; /* Screen Mode */ @@ -107,17 +140,6 @@ struct vc_data { unsigned int vc_decawm : 1; /* Autowrap Mode */ unsigned int vc_deccm : 1; /* Cursor Visible */ unsigned int vc_decim : 1; /* Insert Mode */ - /* attribute flags */ - unsigned int vc_intensity : 2; /* 0=half-bright, 1=normal, 2=bold */ - unsigned int vc_italic:1; - unsigned int vc_underline : 1; - unsigned int vc_blink : 1; - unsigned int vc_reverse : 1; - unsigned int vc_s_intensity : 2; /* saved rendition */ - unsigned int vc_s_italic:1; - unsigned int vc_s_underline : 1; - unsigned int vc_s_blink : 1; - unsigned int vc_s_reverse : 1; /* misc */ unsigned int vc_priv : 3; unsigned int vc_need_wrap : 1; @@ -126,13 +148,9 @@ struct vc_data { unsigned char vc_utf : 1; /* Unicode UTF-8 encoding */ unsigned char vc_utf_count; int vc_utf_char; - unsigned int vc_tab_stop[8]; /* Tab stops. 256 columns. */ + DECLARE_BITMAP(vc_tab_stop, VC_TABSTOPS_COUNT); /* Tab stops. 256 columns. */ unsigned char vc_palette[16*3]; /* Colour palette for VGA+ */ unsigned short * vc_translate; - unsigned char vc_G0_charset; - unsigned char vc_G1_charset; - unsigned char vc_saved_G0; - unsigned char vc_saved_G1; unsigned int vc_resize_user; /* resize request from user */ unsigned int vc_bell_pitch; /* Console bell pitch */ unsigned int vc_bell_duration; /* Console bell duration */ @@ -149,24 +167,29 @@ struct vc { struct work_struct SAK_work; /* might add scrmem, kbd at some time, - to have everything in one place - the disadvantage - would be that vc_cons etc can no longer be static */ + to have everything in one place */ }; extern struct vc vc_cons [MAX_NR_CONSOLES]; extern void vc_SAK(struct work_struct *work); -#define CUR_DEF 0 -#define CUR_NONE 1 -#define CUR_UNDERLINE 2 -#define CUR_LOWER_THIRD 3 -#define CUR_LOWER_HALF 4 -#define CUR_TWO_THIRDS 5 -#define CUR_BLOCK 6 -#define CUR_HWMASK 0x0f -#define CUR_SWMASK 0xfff0 - -#define CUR_DEFAULT CUR_UNDERLINE +#define CUR_MAKE(size, change, set) ((size) | ((change) << 8) | \ + ((set) << 16)) +#define CUR_SIZE(c) ((c) & 0x00000f) +# define CUR_DEF 0 +# define CUR_NONE 1 +# define CUR_UNDERLINE 2 +# define CUR_LOWER_THIRD 3 +# define CUR_LOWER_HALF 4 +# define CUR_TWO_THIRDS 5 +# define CUR_BLOCK 6 +#define CUR_SW 0x000010 +#define CUR_ALWAYS_BG 0x000020 +#define CUR_INVERT_FG_BG 0x000040 +#define CUR_FG 0x000700 +#define CUR_BG 0x007000 +#define CUR_CHANGE(c) ((c) & 0x00ff00) +#define CUR_SET(c) (((c) & 0xff0000) >> 8) bool con_is_visible(const struct vc_data *vc); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index e62b022cb07e..58687a5bf9c8 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -127,7 +127,7 @@ struct cpufreq_policy { /* Cached frequency lookup from cpufreq_driver_resolve_freq. */ unsigned int cached_target_freq; - int cached_resolved_idx; + unsigned int cached_resolved_idx; /* Synchronization for frequency transitions */ bool transition_ongoing; /* Tracks transition status */ diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 191772d4a4d7..a2710e654b64 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -181,6 +181,7 @@ enum cpuhp_state { CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE, CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE, CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE, + CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE, CPUHP_AP_WATCHDOG_ONLINE, CPUHP_AP_WORKQUEUE_ONLINE, CPUHP_AP_RCUTREE_ONLINE, diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h index 525510a9f965..6594dbc34a37 100644 --- a/include/linux/crash_core.h +++ b/include/linux/crash_core.h @@ -38,6 +38,8 @@ phys_addr_t paddr_vmcoreinfo_note(void); #define VMCOREINFO_OSRELEASE(value) \ vmcoreinfo_append_str("OSRELEASE=%s\n", value) +#define VMCOREINFO_BUILD_ID(value) \ + vmcoreinfo_append_str("BUILD-ID=%s\n", value) #define VMCOREINFO_PAGESIZE(value) \ vmcoreinfo_append_str("PAGESIZE=%ld\n", value) #define VMCOREINFO_SYMBOL(name) \ @@ -64,6 +66,10 @@ extern unsigned char *vmcoreinfo_data; extern size_t vmcoreinfo_size; extern u32 *vmcoreinfo_note; +/* raw contents of kernel .notes section */ +extern const void __start_notes __weak; +extern const void __stop_notes __weak; + Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type, void *data, size_t data_len); void final_note(Elf_Word *buf); diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h index 33c16f2de7f6..2f811baf78d2 100644 --- a/include/linux/crush/crush.h +++ b/include/linux/crush/crush.h @@ -17,7 +17,7 @@ * The algorithm was originally described in detail in this paper * (although the algorithm has evolved somewhat since then): * - * http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf + * https://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf * * LGPL2 */ diff --git a/include/linux/dcache.h b/include/linux/dcache.h index a81f0c3cf352..65d975bf9390 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -89,7 +89,7 @@ extern struct dentry_stat_t dentry_stat; struct dentry { /* RCU lookup touched fields */ unsigned int d_flags; /* protected by d_lock */ - seqcount_t d_seq; /* per dentry seqlock */ + seqcount_spinlock_t d_seq; /* per dentry seqlock */ struct hlist_bl_node d_hash; /* lookup hash list */ struct dentry *d_parent; /* parent directory */ struct qstr d_name; diff --git a/include/linux/delay.h b/include/linux/delay.h index 5e016a4029d9..1d0e2ce6b6d9 100644 --- a/include/linux/delay.h +++ b/include/linux/delay.h @@ -16,7 +16,7 @@ * 3. CPU clock rate changes. * * Please see this thread: - * http://lists.openwall.net/linux-kernel/2011/01/09/56 + * https://lists.openwall.net/linux-kernel/2011/01/09/56 */ #include <linux/kernel.h> diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h index 79a6e37a1d6f..9df2dfca68dd 100644 --- a/include/linux/devfreq_cooling.h +++ b/include/linux/devfreq_cooling.h @@ -1,17 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * devfreq_cooling: Thermal cooling device implementation for devices using * devfreq * * Copyright (C) 2014-2015 ARM Limited * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef __DEVFREQ_COOLING_H__ diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index ee50d10f052b..d44a77e8a7e3 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -46,8 +46,6 @@ #include <linux/rcupdate.h> extern struct ww_class reservation_ww_class; -extern struct lock_class_key reservation_seqcount_class; -extern const char reservation_seqcount_string[]; /** * struct dma_resv_list - a list of shared fences @@ -71,7 +69,7 @@ struct dma_resv_list { */ struct dma_resv { struct ww_mutex lock; - seqcount_t seq; + seqcount_ww_mutex_t seq; struct dma_fence __rcu *fence_excl; struct dma_resv_list __rcu *fence; diff --git a/include/linux/dma/k3-psil.h b/include/linux/dma/k3-psil.h index 61d5cc0ad601..1962f75fa2d3 100644 --- a/include/linux/dma/k3-psil.h +++ b/include/linux/dma/k3-psil.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com */ #ifndef K3_PSIL_H_ diff --git a/include/linux/dma/k3-udma-glue.h b/include/linux/dma/k3-udma-glue.h index caadbab1632a..5eb34ad973a7 100644 --- a/include/linux/dma/k3-udma-glue.h +++ b/include/linux/dma/k3-udma-glue.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com */ #ifndef K3_UDMA_GLUE_H_ diff --git a/include/linux/dma/ti-cppi5.h b/include/linux/dma/ti-cppi5.h index 579356ae447e..5896441ee604 100644 --- a/include/linux/dma/ti-cppi5.h +++ b/include/linux/dma/ti-cppi5.h @@ -2,7 +2,7 @@ /* * CPPI5 descriptors interface * - * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com */ #ifndef __TI_CPPI5_H__ diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index abd8cffdf202..6fbd5c99e30c 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -39,6 +39,7 @@ enum dma_status { DMA_IN_PROGRESS, DMA_PAUSED, DMA_ERROR, + DMA_OUT_OF_ORDER, }; /** @@ -61,6 +62,7 @@ enum dma_transaction_type { DMA_SLAVE, DMA_CYCLIC, DMA_INTERLEAVE, + DMA_COMPLETION_NO_ORDER, DMA_REPEAT, DMA_LOAD_EOT, /* last transaction type for creation of the capabilities mask */ @@ -164,7 +166,7 @@ struct dma_interleaved_template { * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of * this transaction * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client - * acknowledges receipt, i.e. has has a chance to establish any dependency + * acknowledges receipt, i.e. has a chance to establish any dependency * chains * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P @@ -479,7 +481,11 @@ enum dma_residue_granularity { * Since the enum dma_transfer_direction is not defined as bit flag for * each type, the dma controller should set BIT(<TYPE>) and same * should be checked by controller as well + * @min_burst: min burst capability per-transfer * @max_burst: max burst capability per-transfer + * @max_sg_burst: max number of SG list entries executed in a single burst + * DMA tansaction with no software intervention for reinitialization. + * Zero value means unlimited number of entries. * @cmd_pause: true, if pause is supported (i.e. for reading residue or * for resume later) * @cmd_resume: true, if resume is supported @@ -492,7 +498,9 @@ struct dma_slave_caps { u32 src_addr_widths; u32 dst_addr_widths; u32 directions; + u32 min_burst; u32 max_burst; + u32 max_sg_burst; bool cmd_pause; bool cmd_resume; bool cmd_terminate; @@ -783,7 +791,11 @@ struct dma_filter { * Since the enum dma_transfer_direction is not defined as bit flag for * each type, the dma controller should set BIT(<TYPE>) and same * should be checked by controller as well + * @min_burst: min burst capability per-transfer * @max_burst: max burst capability per-transfer + * @max_sg_burst: max number of SG list entries executed in a single burst + * DMA tansaction with no software intervention for reinitialization. + * Zero value means unlimited number of entries. * @residue_granularity: granularity of the transfer residue reported * by tx_status * @device_alloc_chan_resources: allocate resources and return the @@ -803,6 +815,8 @@ struct dma_filter { * be called after period_len bytes have been transferred. * @device_prep_interleaved_dma: Transfer expression in a generic way. * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address + * @device_caps: May be used to override the generic DMA slave capabilities + * with per-channel specific ones * @device_config: Pushes a new configuration to a channel, return 0 or an error * code * @device_pause: Pauses any transfer happening on a channel. Returns @@ -853,7 +867,9 @@ struct dma_device { u32 src_addr_widths; u32 dst_addr_widths; u32 directions; + u32 min_burst; u32 max_burst; + u32 max_sg_burst; bool descriptor_reuse; enum dma_residue_granularity residue_granularity; @@ -901,6 +917,8 @@ struct dma_device { struct dma_chan *chan, dma_addr_t dst, u64 data, unsigned long flags); + void (*device_caps)(struct dma_chan *chan, + struct dma_slave_caps *caps); int (*device_config)(struct dma_chan *chan, struct dma_slave_config *config); int (*device_pause)(struct dma_chan *chan); diff --git a/include/linux/dmar.h b/include/linux/dmar.h index d7bf029df737..65565820328a 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -48,6 +48,7 @@ struct dmar_drhd_unit { u16 segment; /* PCI domain */ u8 ignored:1; /* ignore drhd */ u8 include_all:1; + u8 gfx_dedicated:1; /* graphic dedicated */ struct intel_iommu *iommu; }; diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h index 99fc06f0afc1..407c2f281b64 100644 --- a/include/linux/dynamic_queue_limits.h +++ b/include/linux/dynamic_queue_limits.h @@ -38,6 +38,8 @@ #ifdef __KERNEL__ +#include <asm/bug.h> + struct dql { /* Fields accessed in enqueue path (dql_queued) */ unsigned int num_queued; /* Total ever queued */ diff --git a/include/linux/efi.h b/include/linux/efi.h index 05c47f857383..73db1ae04cef 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -606,7 +606,11 @@ extern void *efi_get_pal_addr (void); extern void efi_map_pal_code (void); extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg); extern void efi_gettimeofday (struct timespec64 *ts); +#ifdef CONFIG_EFI extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ +#else +static inline void efi_enter_virtual_mode (void) {} +#endif #ifdef CONFIG_X86 extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size, diff --git a/include/linux/elfcore-compat.h b/include/linux/elfcore-compat.h index 7a37f4ce9fd2..10485f0c9740 100644 --- a/include/linux/elfcore-compat.h +++ b/include/linux/elfcore-compat.h @@ -32,10 +32,6 @@ struct compat_elf_prstatus struct old_timeval32 pr_cutime; struct old_timeval32 pr_cstime; compat_elf_gregset_t pr_reg; -#ifdef CONFIG_BINFMT_ELF_FDPIC - compat_ulong_t pr_exec_fdpic_loadmap; - compat_ulong_t pr_interp_fdpic_loadmap; -#endif compat_int_t pr_fpvalid; }; diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h index 4cad0e784b28..46c3d691f677 100644 --- a/include/linux/elfcore.h +++ b/include/linux/elfcore.h @@ -5,12 +5,65 @@ #include <linux/user.h> #include <linux/bug.h> #include <linux/sched/task_stack.h> - -#include <asm/elf.h> -#include <uapi/linux/elfcore.h> +#include <linux/types.h> +#include <linux/signal.h> +#include <linux/time.h> +#include <linux/ptrace.h> +#include <linux/fs.h> +#include <linux/elf.h> struct coredump_params; +struct elf_siginfo +{ + int si_signo; /* signal number */ + int si_code; /* extra code */ + int si_errno; /* errno */ +}; + +/* + * Definitions to generate Intel SVR4-like core files. + * These mostly have the same names as the SVR4 types with "elf_" + * tacked on the front to prevent clashes with linux definitions, + * and the typedef forms have been avoided. This is mostly like + * the SVR4 structure, but more Linuxy, with things that Linux does + * not support and which gdb doesn't really use excluded. + */ +struct elf_prstatus +{ + struct elf_siginfo pr_info; /* Info associated with signal */ + short pr_cursig; /* Current signal */ + unsigned long pr_sigpend; /* Set of pending signals */ + unsigned long pr_sighold; /* Set of held signals */ + pid_t pr_pid; + pid_t pr_ppid; + pid_t pr_pgrp; + pid_t pr_sid; + struct __kernel_old_timeval pr_utime; /* User time */ + struct __kernel_old_timeval pr_stime; /* System time */ + struct __kernel_old_timeval pr_cutime; /* Cumulative user time */ + struct __kernel_old_timeval pr_cstime; /* Cumulative system time */ + elf_gregset_t pr_reg; /* GP registers */ + int pr_fpvalid; /* True if math co-processor being used. */ +}; + +#define ELF_PRARGSZ (80) /* Number of chars for args */ + +struct elf_prpsinfo +{ + char pr_state; /* numeric process state */ + char pr_sname; /* char for pr_state */ + char pr_zomb; /* zombie */ + char pr_nice; /* nice val */ + unsigned long pr_flag; /* flags */ + __kernel_uid_t pr_uid; + __kernel_gid_t pr_gid; + pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; + /* Lots missing */ + char pr_fname[16]; /* filename of executable */ + char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ +}; + static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *regs) { #ifdef ELF_CORE_COPY_REGS @@ -51,13 +104,6 @@ static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_reg #endif } -#ifdef ELF_CORE_COPY_XFPREGS -static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu) -{ - return ELF_CORE_COPY_XFPREGS(t, xfpu); -} -#endif - /* * These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out * extra segments containing the gate DSO contents. Dumping its diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index d896b8657085..3ceb72b67a7a 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -178,7 +178,7 @@ struct fid { * get_name: * @get_name should find a name for the given @child in the given @parent * directory. The name should be stored in the @name (with the - * understanding that it is already pointing to a a %NAME_MAX+1 sized + * understanding that it is already pointing to a %NAME_MAX+1 sized * buffer. get_name() should return %0 on success, a negative error code * or error. @get_name will be called without @parent->i_mutex held. * diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index b79fa9bb7359..3e9c56ee651f 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h @@ -18,8 +18,10 @@ #define FANOTIFY_CLASS_BITS (FAN_CLASS_NOTIF | FAN_CLASS_CONTENT | \ FAN_CLASS_PRE_CONTENT) -#define FANOTIFY_INIT_FLAGS (FANOTIFY_CLASS_BITS | \ - FAN_REPORT_TID | FAN_REPORT_FID | \ +#define FANOTIFY_FID_BITS (FAN_REPORT_FID | FAN_REPORT_DFID_NAME) + +#define FANOTIFY_INIT_FLAGS (FANOTIFY_CLASS_BITS | FANOTIFY_FID_BITS | \ + FAN_REPORT_TID | \ FAN_CLOEXEC | FAN_NONBLOCK | \ FAN_UNLIMITED_QUEUE | FAN_UNLIMITED_MARKS) diff --git a/include/linux/fb.h b/include/linux/fb.h index 2b530e6d86e4..850f79e9a7cb 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -400,8 +400,6 @@ struct fb_tile_ops { #define FBINFO_HWACCEL_YPAN 0x2000 /* optional */ #define FBINFO_HWACCEL_YWRAP 0x4000 /* optional */ -#define FBINFO_MISC_USEREVENT 0x10000 /* event request - from userspace */ #define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */ /* A driver may set this flag to indicate that it does want a set_par to be diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h index 6d775984905b..b07d88c92bb2 100644 --- a/include/linux/frontswap.h +++ b/include/linux/frontswap.h @@ -10,7 +10,7 @@ /* * Return code to denote that requested number of * frontswap pages are unused(moved to page cache). - * Used in in shmem_unuse and try_to_unuse. + * Used in shmem_unuse and try_to_unuse. */ #define FRONTSWAP_PAGES_UNUSED 2 diff --git a/include/linux/fs.h b/include/linux/fs.h index 488c3ef93601..7c69dd7c6160 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -518,6 +518,16 @@ static inline void i_mmap_unlock_read(struct address_space *mapping) up_read(&mapping->i_mmap_rwsem); } +static inline void i_mmap_assert_locked(struct address_space *mapping) +{ + lockdep_assert_held(&mapping->i_mmap_rwsem); +} + +static inline void i_mmap_assert_write_locked(struct address_space *mapping) +{ + lockdep_assert_held_write(&mapping->i_mmap_rwsem); +} + /* * Might pages of this file be mapped into userspace? */ @@ -528,7 +538,7 @@ static inline int mapping_mapped(struct address_space *mapping) /* * Might pages of this file have been modified in userspace? - * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff + * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap * marks vma as VM_SHARED if it is shared, and the file was opened for * writing i.e. vma may be mprotected writable even if now readonly. * @@ -1712,6 +1722,10 @@ int vfs_mkobj(struct dentry *, umode_t, int (*f)(struct dentry *, umode_t, void *), void *); +int vfs_fchown(struct file *file, uid_t user, gid_t group); +int vfs_fchmod(struct file *file, umode_t mode); +int vfs_utimes(const struct path *path, struct timespec64 *times); + extern long vfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #ifdef CONFIG_COMPAT @@ -1942,27 +1956,27 @@ struct super_operations { /* * Inode flags - they have no relation to superblock flags now */ -#define S_SYNC 1 /* Writes are synced at once */ -#define S_NOATIME 2 /* Do not update access times */ -#define S_APPEND 4 /* Append-only file */ -#define S_IMMUTABLE 8 /* Immutable file */ -#define S_DEAD 16 /* removed, but still open directory */ -#define S_NOQUOTA 32 /* Inode is not counted to quota */ -#define S_DIRSYNC 64 /* Directory modifications are synchronous */ -#define S_NOCMTIME 128 /* Do not update file c/mtime */ -#define S_SWAPFILE 256 /* Do not truncate: swapon got its bmaps */ -#define S_PRIVATE 512 /* Inode is fs-internal */ -#define S_IMA 1024 /* Inode has an associated IMA struct */ -#define S_AUTOMOUNT 2048 /* Automount/referral quasi-directory */ -#define S_NOSEC 4096 /* no suid or xattr security attributes */ +#define S_SYNC (1 << 0) /* Writes are synced at once */ +#define S_NOATIME (1 << 1) /* Do not update access times */ +#define S_APPEND (1 << 2) /* Append-only file */ +#define S_IMMUTABLE (1 << 3) /* Immutable file */ +#define S_DEAD (1 << 4) /* removed, but still open directory */ +#define S_NOQUOTA (1 << 5) /* Inode is not counted to quota */ +#define S_DIRSYNC (1 << 6) /* Directory modifications are synchronous */ +#define S_NOCMTIME (1 << 7) /* Do not update file c/mtime */ +#define S_SWAPFILE (1 << 8) /* Do not truncate: swapon got its bmaps */ +#define S_PRIVATE (1 << 9) /* Inode is fs-internal */ +#define S_IMA (1 << 10) /* Inode has an associated IMA struct */ +#define S_AUTOMOUNT (1 << 11) /* Automount/referral quasi-directory */ +#define S_NOSEC (1 << 12) /* no suid or xattr security attributes */ #ifdef CONFIG_FS_DAX -#define S_DAX 8192 /* Direct Access, avoiding the page cache */ +#define S_DAX (1 << 13) /* Direct Access, avoiding the page cache */ #else -#define S_DAX 0 /* Make all the DAX code disappear */ +#define S_DAX 0 /* Make all the DAX code disappear */ #endif -#define S_ENCRYPTED 16384 /* Encrypted file (using fs/crypto/) */ -#define S_CASEFOLD 32768 /* Casefolded file */ -#define S_VERITY 65536 /* Verity file (using fs/verity/) */ +#define S_ENCRYPTED (1 << 14) /* Encrypted file (using fs/crypto/) */ +#define S_CASEFOLD (1 << 15) /* Casefolded file */ +#define S_VERITY (1 << 16) /* Verity file (using fs/verity/) */ /* * Note that nosuid etc flags are inode-specific: setting some file-system @@ -2646,7 +2660,7 @@ static inline void filemap_set_wb_err(struct address_space *mapping, int err) } /** - * filemap_check_wb_error - has an error occurred since the mark was sampled? + * filemap_check_wb_err - has an error occurred since the mark was sampled? * @mapping: mapping to check for writeback errors * @since: previously-sampled errseq_t * @@ -2946,6 +2960,21 @@ extern void discard_new_inode(struct inode *); extern unsigned int get_next_ino(void); extern void evict_inodes(struct super_block *sb); +/* + * Userspace may rely on the the inode number being non-zero. For example, glibc + * simply ignores files with zero i_ino in unlink() and other places. + * + * As an additional complication, if userspace was compiled with + * _FILE_OFFSET_BITS=32 on a 64-bit kernel we'll only end up reading out the + * lower 32 bits, so we need to check that those aren't zero explicitly. With + * _FILE_OFFSET_BITS=64, this may cause some harmless false-negatives, but + * better safe than sorry. + */ +static inline bool is_zero_ino(ino_t ino) +{ + return (u32)ino == 0; +} + extern void __iget(struct inode * inode); extern void iget_failed(struct inode *); extern void clear_inode(struct inode *); diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h index cf1015abfbf2..783b48dedb72 100644 --- a/include/linux/fs_struct.h +++ b/include/linux/fs_struct.h @@ -9,7 +9,7 @@ struct fs_struct { int users; spinlock_t lock; - seqcount_t seq; + seqcount_spinlock_t seq; int umask; int in_exec; struct path root, pwd; diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 5ab28f6c7d26..f8acddcf54fb 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -23,19 +23,14 @@ * have changed (i.e. renamed over). * * Unlike fsnotify_parent(), the event will be reported regardless of the - * FS_EVENT_ON_CHILD mask on the parent inode. + * FS_EVENT_ON_CHILD mask on the parent inode and will not be reported if only + * the child is interested and not the parent. */ static inline void fsnotify_name(struct inode *dir, __u32 mask, struct inode *child, const struct qstr *name, u32 cookie) { - fsnotify(dir, mask, child, FSNOTIFY_EVENT_INODE, name, cookie); - /* - * Send another flavor of the event without child inode data and - * without the specific event type (e.g. FS_CREATE|FS_IS_DIR). - * The name is relative to the dir inode the event is reported to. - */ - fsnotify(dir, FS_DIR_MODIFY, dir, FSNOTIFY_EVENT_INODE, name, 0); + fsnotify(mask, child, FSNOTIFY_EVENT_INODE, dir, name, NULL, cookie); } static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry, @@ -44,38 +39,55 @@ static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry, fsnotify_name(dir, mask, d_inode(dentry), &dentry->d_name, 0); } -/* - * Simple wrappers to consolidate calls fsnotify_parent()/fsnotify() when - * an event is on a file/dentry. - */ -static inline void fsnotify_dentry(struct dentry *dentry, __u32 mask) +static inline void fsnotify_inode(struct inode *inode, __u32 mask) +{ + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + + fsnotify(mask, inode, FSNOTIFY_EVENT_INODE, NULL, NULL, inode, 0); +} + +/* Notify this dentry's parent about a child's events. */ +static inline int fsnotify_parent(struct dentry *dentry, __u32 mask, + const void *data, int data_type) { struct inode *inode = d_inode(dentry); - if (S_ISDIR(inode->i_mode)) + if (S_ISDIR(inode->i_mode)) { mask |= FS_ISDIR; - fsnotify_parent(dentry, mask, inode, FSNOTIFY_EVENT_INODE); - fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); + /* sb/mount marks are not interested in name of directory */ + if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED)) + goto notify_child; + } + + /* disconnected dentry cannot notify parent */ + if (IS_ROOT(dentry)) + goto notify_child; + + return __fsnotify_parent(dentry, mask, data, data_type); + +notify_child: + return fsnotify(mask, data, data_type, NULL, NULL, inode, 0); +} + +/* + * Simple wrappers to consolidate calls to fsnotify_parent() when an event + * is on a file/dentry. + */ +static inline void fsnotify_dentry(struct dentry *dentry, __u32 mask) +{ + fsnotify_parent(dentry, mask, d_inode(dentry), FSNOTIFY_EVENT_INODE); } static inline int fsnotify_file(struct file *file, __u32 mask) { const struct path *path = &file->f_path; - struct inode *inode = file_inode(file); - int ret; if (file->f_mode & FMODE_NONOTIFY) return 0; - if (S_ISDIR(inode->i_mode)) - mask |= FS_ISDIR; - - ret = fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH); - if (ret) - return ret; - - return fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); + return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH); } /* Simple call site for access decisions */ @@ -108,12 +120,7 @@ static inline int fsnotify_perm(struct file *file, int mask) */ static inline void fsnotify_link_count(struct inode *inode) { - __u32 mask = FS_ATTRIB; - - if (S_ISDIR(inode->i_mode)) - mask |= FS_ISDIR; - - fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); + fsnotify_inode(inode, FS_ATTRIB); } /* @@ -128,7 +135,6 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, u32 fs_cookie = fsnotify_get_cookie(); __u32 old_dir_mask = FS_MOVED_FROM; __u32 new_dir_mask = FS_MOVED_TO; - __u32 mask = FS_MOVE_SELF; const struct qstr *new_name = &moved->d_name; if (old_dir == new_dir) @@ -137,7 +143,6 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, if (isdir) { old_dir_mask |= FS_ISDIR; new_dir_mask |= FS_ISDIR; - mask |= FS_ISDIR; } fsnotify_name(old_dir, old_dir_mask, source, old_name, fs_cookie); @@ -145,9 +150,7 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, if (target) fsnotify_link_count(target); - - if (source) - fsnotify(source, mask, source, FSNOTIFY_EVENT_INODE, NULL, 0); + fsnotify_inode(source, FS_MOVE_SELF); audit_inode_child(new_dir, moved, AUDIT_TYPE_CHILD_CREATE); } @@ -172,12 +175,7 @@ static inline void fsnotify_vfsmount_delete(struct vfsmount *mnt) */ static inline void fsnotify_inoderemove(struct inode *inode) { - __u32 mask = FS_DELETE_SELF; - - if (S_ISDIR(inode->i_mode)) - mask |= FS_ISDIR; - - fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); + fsnotify_inode(inode, FS_DELETE_SELF); __fsnotify_inode_delete(inode); } diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index f0c506405b54..f8529a3a2923 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -47,11 +47,13 @@ #define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */ #define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */ #define FS_OPEN_EXEC_PERM 0x00040000 /* open/exec event in a permission hook */ -#define FS_DIR_MODIFY 0x00080000 /* Directory entry was modified */ #define FS_EXCL_UNLINK 0x04000000 /* do not send events if object is unlinked */ -/* This inode cares about things that happen to its children. Always set for - * dnotify and inotify. */ +/* + * Set on inode mark that cares about things that happen to its children. + * Always set for dnotify and inotify. + * Set on inode/sb/mount marks that care about parent/name info. + */ #define FS_EVENT_ON_CHILD 0x08000000 #define FS_DN_RENAME 0x10000000 /* file renamed */ @@ -67,21 +69,28 @@ * The watching parent may get an FS_ATTRIB|FS_EVENT_ON_CHILD event * when a directory entry inside a child subdir changes. */ -#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE | \ - FS_DIR_MODIFY) +#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE) #define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM | \ FS_OPEN_EXEC_PERM) /* - * This is a list of all events that may get sent to a parent based on fs event - * happening to inodes inside that directory. + * This is a list of all events that may get sent to a parent that is watching + * with flag FS_EVENT_ON_CHILD based on fs event on a child of that directory. */ #define FS_EVENTS_POSS_ON_CHILD (ALL_FSNOTIFY_PERM_EVENTS | \ FS_ACCESS | FS_MODIFY | FS_ATTRIB | \ FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | \ FS_OPEN | FS_OPEN_EXEC) +/* + * This is a list of all events that may get sent with the parent inode as the + * @to_tell argument of fsnotify(). + * It may include events that can be sent to an inode/sb/mount mark, but cannot + * be sent to a parent watching children. + */ +#define FS_EVENTS_POSS_TO_PARENT (FS_EVENTS_POSS_ON_CHILD) + /* Events that can be reported to backends */ #define ALL_FSNOTIFY_EVENTS (ALL_FSNOTIFY_DIRENT_EVENTS | \ FS_EVENTS_POSS_ON_CHILD | \ @@ -108,18 +117,41 @@ struct mem_cgroup; * these operations for each relevant group. * * handle_event - main call for a group to handle an fs event + * @group: group to notify + * @mask: event type and flags + * @data: object that event happened on + * @data_type: type of object for fanotify_data_XXX() accessors + * @dir: optional directory associated with event - + * if @file_name is not NULL, this is the directory that + * @file_name is relative to + * @file_name: optional file name associated with event + * @cookie: inotify rename cookie + * @iter_info: array of marks from this group that are interested in the event + * + * handle_inode_event - simple variant of handle_event() for groups that only + * have inode marks and don't have ignore mask + * @mark: mark to notify + * @mask: event type and flags + * @inode: inode that event happened on + * @dir: optional directory associated with event - + * if @file_name is not NULL, this is the directory that + * @file_name is relative to. + * @file_name: optional file name associated with event + * * free_group_priv - called when a group refcnt hits 0 to clean up the private union * freeing_mark - called when a mark is being destroyed for some reason. The group - * MUST be holding a reference on each mark and that reference must be - * dropped in this function. inotify uses this function to send - * userspace messages that marks have been removed. + * MUST be holding a reference on each mark and that reference must be + * dropped in this function. inotify uses this function to send + * userspace messages that marks have been removed. */ struct fsnotify_ops { - int (*handle_event)(struct fsnotify_group *group, - struct inode *inode, - u32 mask, const void *data, int data_type, + int (*handle_event)(struct fsnotify_group *group, u32 mask, + const void *data, int data_type, struct inode *dir, const struct qstr *file_name, u32 cookie, struct fsnotify_iter_info *iter_info); + int (*handle_inode_event)(struct fsnotify_mark *mark, u32 mask, + struct inode *inode, struct inode *dir, + const struct qstr *file_name); void (*free_group_priv)(struct fsnotify_group *group); void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group); void (*free_event)(struct fsnotify_event *event); @@ -220,12 +252,11 @@ enum fsnotify_data_type { FSNOTIFY_EVENT_INODE, }; -static inline const struct inode *fsnotify_data_inode(const void *data, - int data_type) +static inline struct inode *fsnotify_data_inode(const void *data, int data_type) { switch (data_type) { case FSNOTIFY_EVENT_INODE: - return data; + return (struct inode *)data; case FSNOTIFY_EVENT_PATH: return d_inode(((const struct path *)data)->dentry); default: @@ -246,6 +277,7 @@ static inline const struct path *fsnotify_data_path(const void *data, enum fsnotify_obj_type { FSNOTIFY_OBJ_TYPE_INODE, + FSNOTIFY_OBJ_TYPE_CHILD, FSNOTIFY_OBJ_TYPE_VFSMOUNT, FSNOTIFY_OBJ_TYPE_SB, FSNOTIFY_OBJ_TYPE_COUNT, @@ -253,6 +285,7 @@ enum fsnotify_obj_type { }; #define FSNOTIFY_OBJ_TYPE_INODE_FL (1U << FSNOTIFY_OBJ_TYPE_INODE) +#define FSNOTIFY_OBJ_TYPE_CHILD_FL (1U << FSNOTIFY_OBJ_TYPE_CHILD) #define FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL (1U << FSNOTIFY_OBJ_TYPE_VFSMOUNT) #define FSNOTIFY_OBJ_TYPE_SB_FL (1U << FSNOTIFY_OBJ_TYPE_SB) #define FSNOTIFY_OBJ_ALL_TYPES_MASK ((1U << FSNOTIFY_OBJ_TYPE_COUNT) - 1) @@ -297,6 +330,7 @@ static inline struct fsnotify_mark *fsnotify_iter_##name##_mark( \ } FSNOTIFY_ITER_FUNCS(inode, INODE) +FSNOTIFY_ITER_FUNCS(child, CHILD) FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT) FSNOTIFY_ITER_FUNCS(sb, SB) @@ -377,15 +411,29 @@ struct fsnotify_mark { /* called from the vfs helpers */ /* main fsnotify call to send events */ -extern int fsnotify(struct inode *to_tell, __u32 mask, const void *data, - int data_type, const struct qstr *name, u32 cookie); -extern int fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, +extern int fsnotify(__u32 mask, const void *data, int data_type, + struct inode *dir, const struct qstr *name, + struct inode *inode, u32 cookie); +extern int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, int data_type); extern void __fsnotify_inode_delete(struct inode *inode); extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt); extern void fsnotify_sb_delete(struct super_block *sb); extern u32 fsnotify_get_cookie(void); +static inline __u32 fsnotify_parent_needed_mask(__u32 mask) +{ + /* FS_EVENT_ON_CHILD is set on marks that want parent/name info */ + if (!(mask & FS_EVENT_ON_CHILD)) + return 0; + /* + * This object might be watched by a mark that cares about parent/name + * info, does it care about the specific set of events that can be + * reported with parent/name info? + */ + return mask & FS_EVENTS_POSS_TO_PARENT; +} + static inline int fsnotify_inode_watches_children(struct inode *inode) { /* FS_EVENT_ON_CHILD is set if the inode may care */ @@ -535,13 +583,14 @@ static inline void fsnotify_init_event(struct fsnotify_event *event, #else -static inline int fsnotify(struct inode *to_tell, __u32 mask, const void *data, - int data_type, const struct qstr *name, u32 cookie) +static inline int fsnotify(__u32 mask, const void *data, int data_type, + struct inode *dir, const struct qstr *name, + struct inode *inode, u32 cookie) { return 0; } -static inline int fsnotify_parent(struct dentry *dentry, __u32 mask, +static inline int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, int data_type) { return 0; diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h index 02393c0c98f9..bfd00320c7f3 100644 --- a/include/linux/generic-radix-tree.h +++ b/include/linux/generic-radix-tree.h @@ -44,7 +44,7 @@ struct genradix_root; struct __genradix { - struct genradix_root __rcu *root; + struct genradix_root *root; }; /* diff --git a/include/linux/highmem.h b/include/linux/highmem.h index d6e82e3de027..14e6202ce47f 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -73,7 +73,7 @@ static inline void kunmap(struct page *page) * no global lock is needed and because the kmap code must perform a global TLB * invalidation when the kmap pool wraps. * - * However when holding an atomic kmap is is not legal to sleep, so atomic + * However when holding an atomic kmap it is not legal to sleep, so atomic * kmaps are appropriate for short, tight code paths only. * * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 15c8ac313678..107cedd7019a 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -17,6 +17,7 @@ #include <linux/init.h> #include <linux/list.h> #include <linux/percpu.h> +#include <linux/seqlock.h> #include <linux/timer.h> #include <linux/timerqueue.h> @@ -159,7 +160,7 @@ struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; unsigned int index; clockid_t clockid; - seqcount_t seq; + seqcount_raw_spinlock_t seq; struct hrtimer *running; struct timerqueue_head active; ktime_t (*get_time)(void); diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 71f20776b06c..467302056e17 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -42,7 +42,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned char *vec); extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, - unsigned long new_addr, unsigned long old_end, + unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd); extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot, @@ -181,13 +181,6 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, #define transparent_hugepage_use_zero_page() \ (transparent_hugepage_flags & \ (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) -#ifdef CONFIG_DEBUG_VM -#define transparent_hugepage_debug_cow() \ - (transparent_hugepage_flags & \ - (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG)) -#else /* CONFIG_DEBUG_VM */ -#define transparent_hugepage_debug_cow() 0 -#endif /* CONFIG_DEBUG_VM */ extern unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 50650d0d01b9..d5cc5f802dd4 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -10,6 +10,7 @@ #include <linux/list.h> #include <linux/kref.h> #include <linux/pgtable.h> +#include <linux/gfp.h> struct ctl_table; struct user_struct; @@ -164,7 +165,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz); pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz); -int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); +int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long *addr, pte_t *ptep); void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, unsigned long *start, unsigned long *end); struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, @@ -203,8 +205,9 @@ static inline struct address_space *hugetlb_page_mapping_lock_write( return NULL; } -static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, - pte_t *ptep) +static inline int huge_pmd_unshare(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long *addr, pte_t *ptep) { return 0; } @@ -504,13 +507,10 @@ struct huge_bootmem_page { struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve); -struct page *alloc_huge_page_node(struct hstate *h, int nid); struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, - nodemask_t *nmask); + nodemask_t *nmask, gfp_t gfp_mask); struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address); -struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, - int nid, nodemask_t *nmask); int huge_add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t idx); @@ -692,6 +692,27 @@ static inline bool hugepage_movable_supported(struct hstate *h) return true; } +/* Movability of hugepages depends on migration support. */ +static inline gfp_t htlb_alloc_mask(struct hstate *h) +{ + if (hugepage_movable_supported(h)) + return GFP_HIGHUSER_MOVABLE; + else + return GFP_HIGHUSER; +} + +static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) +{ + gfp_t modified_mask = htlb_alloc_mask(h); + + /* Some callers might want to enforce node */ + modified_mask |= (gfp_mask & __GFP_THISNODE); + + modified_mask |= (gfp_mask & __GFP_NOWARN); + + return modified_mask; +} + static inline spinlock_t *huge_pte_lockptr(struct hstate *h, struct mm_struct *mm, pte_t *pte) { @@ -759,13 +780,9 @@ static inline struct page *alloc_huge_page(struct vm_area_struct *vma, return NULL; } -static inline struct page *alloc_huge_page_node(struct hstate *h, int nid) -{ - return NULL; -} - static inline struct page * -alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask) +alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, + nodemask_t *nmask, gfp_t gfp_mask) { return NULL; } @@ -878,6 +895,16 @@ static inline bool hugepage_movable_supported(struct hstate *h) return false; } +static inline gfp_t htlb_alloc_mask(struct hstate *h) +{ + return 0; +} + +static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) +{ + return 0; +} + static inline spinlock_t *huge_pte_lockptr(struct hstate *h, struct mm_struct *mm, pte_t *pte) { diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 4e7714c88f95..fc55ea41d323 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -231,7 +231,6 @@ enum i2c_alert_protocol { * @detect: Callback for device detection * @address_list: The I2C addresses to probe (for detect) * @clients: List of detected clients we created (for i2c-core use only) - * @disable_i2c_core_irq_mapping: Tell the i2c-core to not do irq-mapping * * The driver.owner field should be set to the module owner of this driver. * The driver.name field should be set to the name of this driver. @@ -290,8 +289,6 @@ struct i2c_driver { int (*detect)(struct i2c_client *client, struct i2c_board_info *info); const unsigned short *address_list; struct list_head clients; - - bool disable_i2c_core_irq_mapping; }; #define to_i2c_driver(d) container_of(d, struct i2c_driver, driver) @@ -609,6 +606,14 @@ struct i2c_timings { * may configure padmux here for SDA/SCL line or something else they want. * @scl_gpiod: gpiod of the SCL line. Only required for GPIO recovery. * @sda_gpiod: gpiod of the SDA line. Only required for GPIO recovery. + * @pinctrl: pinctrl used by GPIO recovery to change the state of the I2C pins. + * Optional. + * @pins_default: default pinctrl state of SCL/SDA lines, when they are assigned + * to the I2C bus. Optional. Populated internally for GPIO recovery, if + * state with the name PINCTRL_STATE_DEFAULT is found and pinctrl is valid. + * @pins_gpio: recovery pinctrl state of SCL/SDA lines, when they are used as + * GPIOs. Optional. Populated internally for GPIO recovery, if this state + * is called "gpio" or "recovery" and pinctrl is valid. */ struct i2c_bus_recovery_info { int (*recover_bus)(struct i2c_adapter *adap); @@ -625,6 +630,9 @@ struct i2c_bus_recovery_info { /* gpio recovery */ struct gpio_desc *scl_gpiod; struct gpio_desc *sda_gpiod; + struct pinctrl *pinctrl; + struct pinctrl_state *pins_default; + struct pinctrl_state *pins_gpio; }; int i2c_recover_bus(struct i2c_adapter *adap); diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h index 7bc961defa87..caa8bb279a34 100644 --- a/include/linux/iio/common/cros_ec_sensors_core.h +++ b/include/linux/iio/common/cros_ec_sensors_core.h @@ -42,6 +42,10 @@ typedef irqreturn_t (*cros_ec_sensors_capture_t)(int irq, void *p); * @resp: motion sensor response structure * @type: type of motion sensor * @loc: location where the motion sensor is placed + * @range_updated: True if the range of the sensor has been + * updated. + * @curr_range: If updated, the current range value. + * It will be reapplied at every resume. * @calib: calibration parameters. Note that trigger * captured data will always provide the calibrated * data @@ -65,6 +69,9 @@ struct cros_ec_sensors_core_state { enum motionsensor_type type; enum motionsensor_location loc; + bool range_updated; + int curr_range; + struct calib_data { s16 offset; u16 scale; @@ -114,7 +121,9 @@ int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st, struct iio_chan_spec const *chan, int val, int val2, long mask); -/* List of extended channel specification for all sensors */ +extern const struct dev_pm_ops cros_ec_sensors_pm_ops; + +/* List of extended channel specification for all sensors. */ extern const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[]; extern const struct attribute *cros_ec_sensor_fifo_attributes[]; diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h new file mode 100644 index 000000000000..f2e94196d31f --- /dev/null +++ b/include/linux/iio/iio-opaque.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _INDUSTRIAL_IO_OPAQUE_H_ +#define _INDUSTRIAL_IO_OPAQUE_H_ + +/** + * struct iio_dev_opaque - industrial I/O device opaque information + * @indio_dev: public industrial I/O device information + * @event_interface: event chrdevs associated with interrupt lines + * @buffer_list: list of all buffers currently attached + * @channel_attr_list: keep track of automatically created channel + * attributes + * @chan_attr_group: group for all attrs in base directory + * @debugfs_dentry: device specific debugfs dentry + * @cached_reg_addr: cached register address for debugfs reads + * @read_buf: read buffer to be used for the initial reg read + * @read_buf_len: data length in @read_buf + */ +struct iio_dev_opaque { + struct iio_dev indio_dev; + struct iio_event_interface *event_interface; + struct list_head buffer_list; + struct list_head channel_attr_list; + struct attribute_group chan_attr_group; +#if defined(CONFIG_DEBUG_FS) + struct dentry *debugfs_dentry; + unsigned cached_reg_addr; + char read_buf[20]; + unsigned int read_buf_len; +#endif +}; + +#define to_iio_dev_opaque(indio_dev) \ + container_of(indio_dev, struct iio_dev_opaque, indio_dev) + +#endif diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index a1be82e74c93..e2df67a3b9ab 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -488,9 +488,7 @@ struct iio_buffer_setup_ops { * @currentmode: [DRIVER] current operating mode * @dev: [DRIVER] device structure, should be assigned a parent * and owner - * @event_interface: [INTERN] event chrdevs associated with interrupt lines * @buffer: [DRIVER] any buffer present - * @buffer_list: [INTERN] list of all buffers currently attached * @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux * @mlock: [INTERN] lock used to prevent simultaneous device state * changes @@ -506,9 +504,6 @@ struct iio_buffer_setup_ops { * @pollfunc_event: [DRIVER] function run on events trigger being received * @channels: [DRIVER] channel specification structure table * @num_channels: [DRIVER] number of channels specified in @channels. - * @channel_attr_list: [INTERN] keep track of automatically created channel - * attributes - * @chan_attr_group: [INTERN] group for all attrs in base directory * @name: [DRIVER] name of the device. * @label: [DRIVER] unique name to identify which device this is * @info: [DRIVER] callbacks and constant info from driver @@ -520,8 +515,8 @@ struct iio_buffer_setup_ops { * @groups: [INTERN] attribute groups * @groupcounter: [INTERN] index of next attribute group * @flags: [INTERN] file ops related flags including busy flag. - * @debugfs_dentry: [INTERN] device specific debugfs dentry. - * @cached_reg_addr: [INTERN] cached register address for debugfs reads. + * @priv: [DRIVER] reference to driver's private information + * **MUST** be accessed **ONLY** via iio_priv() helper */ struct iio_dev { int id; @@ -531,10 +526,7 @@ struct iio_dev { int currentmode; struct device dev; - struct iio_event_interface *event_interface; - struct iio_buffer *buffer; - struct list_head buffer_list; int scan_bytes; struct mutex mlock; @@ -551,8 +543,6 @@ struct iio_dev { struct iio_chan_spec const *channels; int num_channels; - struct list_head channel_attr_list; - struct attribute_group chan_attr_group; const char *name; const char *label; const struct iio_info *info; @@ -565,12 +555,7 @@ struct iio_dev { int groupcounter; unsigned long flags; -#if defined(CONFIG_DEBUG_FS) - struct dentry *debugfs_dentry; - unsigned cached_reg_addr; - char read_buf[20]; - unsigned int read_buf_len; -#endif + void *priv; }; const struct iio_chan_spec @@ -649,6 +634,26 @@ static inline struct iio_dev *iio_device_get(struct iio_dev *indio_dev) return indio_dev ? dev_to_iio_dev(get_device(&indio_dev->dev)) : NULL; } +/** + * iio_device_set_parent() - assign parent device to the IIO device object + * @indio_dev: IIO device structure + * @parent: reference to parent device object + * + * This utility must be called between IIO device allocation + * (via devm_iio_device_alloc()) & IIO device registration + * (via {devm_}iio_device_register()). + * By default, the device allocation will also assign a parent device to + * the IIO device object. In cases where devm_iio_device_alloc() is used, + * sometimes the parent device must be different than the device used to + * manage the allocation. + * In that case, this helper should be used to change the parent, hence the + * requirement to call this between allocation & registration. + **/ +static inline void iio_device_set_parent(struct iio_dev *indio_dev, + struct device *parent) +{ + indio_dev->dev.parent = parent; +} /** * iio_device_set_drvdata() - Set device driver data @@ -669,28 +674,23 @@ static inline void iio_device_set_drvdata(struct iio_dev *indio_dev, void *data) * * Returns the data previously set with iio_device_set_drvdata() */ -static inline void *iio_device_get_drvdata(struct iio_dev *indio_dev) +static inline void *iio_device_get_drvdata(const struct iio_dev *indio_dev) { return dev_get_drvdata(&indio_dev->dev); } /* Can we make this smaller? */ #define IIO_ALIGN L1_CACHE_BYTES -struct iio_dev *iio_device_alloc(int sizeof_priv); +struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv); +/* The information at the returned address is guaranteed to be cacheline aligned */ static inline void *iio_priv(const struct iio_dev *indio_dev) { - return (char *)indio_dev + ALIGN(sizeof(struct iio_dev), IIO_ALIGN); -} - -static inline struct iio_dev *iio_priv_to_dev(void *priv) -{ - return (struct iio_dev *)((char *)priv - - ALIGN(sizeof(struct iio_dev), IIO_ALIGN)); + return indio_dev->priv; } void iio_device_free(struct iio_dev *indio_dev); -struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv); +struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv); struct iio_trigger *devm_iio_trigger_alloc(struct device *dev, const char *fmt, ...); /** @@ -709,10 +709,7 @@ static inline bool iio_buffer_enabled(struct iio_dev *indio_dev) * @indio_dev: IIO device structure for device **/ #if defined(CONFIG_DEBUG_FS) -static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) -{ - return indio_dev->debugfs_dentry; -} +struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev); #else static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) { diff --git a/include/linux/iio/trigger_consumer.h b/include/linux/iio/trigger_consumer.h index c3c6ba5ec423..3aa2f132dd67 100644 --- a/include/linux/iio/trigger_consumer.h +++ b/include/linux/iio/trigger_consumer.h @@ -50,11 +50,4 @@ irqreturn_t iio_pollfunc_store_time(int irq, void *p); void iio_trigger_notify_done(struct iio_trigger *trig); -/* - * Two functions for common case where all that happens is a pollfunc - * is attached and detached from a trigger - */ -int iio_triggered_buffer_postenable(struct iio_dev *indio_dev); -int iio_triggered_buffer_predisable(struct iio_dev *indio_dev); - #endif diff --git a/include/linux/ima.h b/include/linux/ima.h index 9164e1534ec9..d15100de6cdd 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -25,7 +25,7 @@ extern int ima_post_read_file(struct file *file, void *buf, loff_t size, enum kernel_read_file_id id); extern void ima_post_path_mknod(struct dentry *dentry); extern int ima_file_hash(struct file *file, char *buf, size_t buf_size); -extern void ima_kexec_cmdline(const void *buf, int size); +extern void ima_kexec_cmdline(int kernel_fd, const void *buf, int size); #ifdef CONFIG_IMA_KEXEC extern void ima_add_kexec_buffer(struct kimage *image); @@ -103,7 +103,7 @@ static inline int ima_file_hash(struct file *file, char *buf, size_t buf_size) return -EOPNOTSUPP; } -static inline void ima_kexec_cmdline(const void *buf, int size) {} +static inline void ima_kexec_cmdline(int kernel_fd, const void *buf, int size) {} #endif /* CONFIG_IMA */ #ifndef CONFIG_IMA_KEXEC diff --git a/include/linux/init_syscalls.h b/include/linux/init_syscalls.h new file mode 100644 index 000000000000..92045d18cbfc --- /dev/null +++ b/include/linux/init_syscalls.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +int __init init_mount(const char *dev_name, const char *dir_name, + const char *type_page, unsigned long flags, void *data_page); +int __init init_umount(const char *name, int flags); +int __init init_chdir(const char *filename); +int __init init_chroot(const char *filename); +int __init init_chown(const char *filename, uid_t user, gid_t group, int flags); +int __init init_chmod(const char *filename, umode_t mode); +int __init init_eaccess(const char *filename); +int __init init_stat(const char *filename, struct kstat *stat, int flags); +int __init init_mknod(const char *filename, umode_t mode, unsigned int dev); +int __init init_link(const char *oldname, const char *newname); +int __init init_symlink(const char *oldname, const char *newname); +int __init init_unlink(const char *pathname); +int __init init_mkdir(const char *pathname, umode_t mode); +int __init init_rmdir(const char *pathname); +int __init init_utimes(char *filename, struct timespec64 *ts); +int __init init_dup(struct file *file); diff --git a/include/linux/initrd.h b/include/linux/initrd.h index aa5914355728..8db6f8c8030b 100644 --- a/include/linux/initrd.h +++ b/include/linux/initrd.h @@ -2,12 +2,6 @@ #define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */ -/* 1 = load ramdisk, 0 = don't load */ -extern int rd_doload; - -/* 1 = prompt for ramdisk, 0 = don't prompt */ -extern int rd_prompt; - /* starting block # of image */ extern int rd_image_start; diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 3e8fa1c7a1e6..b1ed2f25f7c0 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -381,8 +381,7 @@ enum { #define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK) #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) -#define QI_DEV_EIOTLB_GLOB(g) ((u64)g) -#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) +#define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32) #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) #define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \ @@ -600,6 +599,8 @@ struct intel_iommu { struct iommu_device iommu; /* IOMMU core code handle */ int node; u32 flags; /* Software defined flags */ + + struct dmar_drhd_unit *drhd; }; /* PCI domain-device relationship */ @@ -705,7 +706,7 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr, void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid, u32 pasid, u16 qdep, u64 addr, - unsigned int size_order, u64 granu); + unsigned int size_order); void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu, int pasid); @@ -728,6 +729,7 @@ void iommu_flush_write_buffer(struct intel_iommu *iommu); int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev); struct dmar_domain *find_domain(struct device *dev); struct device_domain_info *get_domain_info(struct device *dev); +struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn); #ifdef CONFIG_INTEL_IOMMU_SVM extern void intel_svm_check(struct intel_iommu *iommu); @@ -740,6 +742,9 @@ struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm, void *drvdata); void intel_svm_unbind(struct iommu_sva *handle); int intel_svm_get_pasid(struct iommu_sva *handle); +int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt, + struct iommu_page_response *msg); + struct svm_dev_ops; struct intel_svm_dev { @@ -766,8 +771,6 @@ struct intel_svm { struct list_head devs; struct list_head list; }; - -extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev); #else static inline void intel_svm_check(struct intel_iommu *iommu) {} #endif diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 53d53c6c2be9..23285ba645db 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -155,7 +155,7 @@ struct io_pgtable_cfg { */ struct io_pgtable_ops { int (*map)(struct io_pgtable_ops *ops, unsigned long iova, - phys_addr_t paddr, size_t size, int prot); + phys_addr_t paddr, size_t size, int prot, gfp_t gfp); size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, size_t size, struct iommu_iotlb_gather *gather); phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 5f0b7859d2eb..fee209efb756 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -31,12 +31,6 @@ * if the IOMMU page table format is equivalent. */ #define IOMMU_PRIV (1 << 5) -/* - * Non-coherent masters can use this page protection flag to set cacheable - * memory attributes for only a transparent outer level of cache, also known as - * the last-level or system cache. - */ -#define IOMMU_SYS_CACHE_ONLY (1 << 6) struct iommu_ops; struct iommu_group; @@ -457,22 +451,6 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io extern void iommu_set_fault_handler(struct iommu_domain *domain, iommu_fault_handler_t handler, void *token); -/** - * iommu_map_sgtable - Map the given buffer to the IOMMU domain - * @domain: The IOMMU domain to perform the mapping - * @iova: The start address to map the buffer - * @sgt: The sg_table object describing the buffer - * @prot: IOMMU protection bits - * - * Creates a mapping at @iova for the buffer described by a scatterlist - * stored in the given sg_table object in the provided IOMMU domain. - */ -static inline size_t iommu_map_sgtable(struct iommu_domain *domain, - unsigned long iova, struct sg_table *sgt, int prot) -{ - return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot); -} - extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); extern void generic_iommu_put_resv_regions(struct device *dev, @@ -1079,6 +1057,22 @@ static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) } #endif /* CONFIG_IOMMU_API */ +/** + * iommu_map_sgtable - Map the given buffer to the IOMMU domain + * @domain: The IOMMU domain to perform the mapping + * @iova: The start address to map the buffer + * @sgt: The sg_table object describing the buffer + * @prot: IOMMU protection bits + * + * Creates a mapping at @iova for the buffer described by a scatterlist + * stored in the given sg_table object in the provided IOMMU domain. + */ +static inline size_t iommu_map_sgtable(struct iommu_domain *domain, + unsigned long iova, struct sg_table *sgt, int prot) +{ + return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot); +} + #ifdef CONFIG_IOMMU_DEBUGFS extern struct dentry *iommu_debugfs_dir; void iommu_debugfs_setup(void); diff --git a/include/linux/irqchip/irq-omap-intc.h b/include/linux/irqchip/irq-omap-intc.h index 216e5adf80ce..dca379c0d7eb 100644 --- a/include/linux/irqchip/irq-omap-intc.h +++ b/include/linux/irqchip/irq-omap-intc.h @@ -2,7 +2,7 @@ /** * irq-omap-intc.h - INTC Idle Functions * - * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com * * Author: Felipe Balbi <balbi@ti.com> */ diff --git a/include/linux/jhash.h b/include/linux/jhash.h index ba2f6a9776b6..19ddd43aee68 100644 --- a/include/linux/jhash.h +++ b/include/linux/jhash.h @@ -5,7 +5,7 @@ * * Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net) * - * http://burtleburtle.net/bob/hash/ + * https://burtleburtle.net/bob/hash/ * * These are the credits from Bob's sources: * diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 82522e996c76..087fba34b209 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -38,7 +38,6 @@ extern void kasan_disable_current(void); void kasan_unpoison_shadow(const void *address, size_t size); void kasan_unpoison_task_stack(struct task_struct *task); -void kasan_unpoison_stack_above_sp_to(const void *watermark); void kasan_alloc_pages(struct page *page, unsigned int order); void kasan_free_pages(struct page *page, unsigned int order); @@ -101,7 +100,6 @@ void kasan_restore_multi_shot(bool enabled); static inline void kasan_unpoison_shadow(const void *address, size_t size) {} static inline void kasan_unpoison_task_stack(struct task_struct *task) {} -static inline void kasan_unpoison_stack_above_sp_to(const void *watermark) {} static inline void kasan_enable_current(void) {} static inline void kasan_disable_current(void) {} @@ -174,11 +172,13 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } void kasan_cache_shrink(struct kmem_cache *cache); void kasan_cache_shutdown(struct kmem_cache *cache); +void kasan_record_aux_stack(void *ptr); #else /* CONFIG_KASAN_GENERIC */ static inline void kasan_cache_shrink(struct kmem_cache *cache) {} static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} +static inline void kasan_record_aux_stack(void *ptr) {} #endif /* CONFIG_KASAN_GENERIC */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 82d91547d122..500def620d8f 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -17,7 +17,6 @@ #include <asm/byteorder.h> #include <asm/div64.h> #include <uapi/linux/kernel.h> -#include <asm/div64.h> #define STACK_MAGIC 0xdeadbeef @@ -34,6 +33,7 @@ #define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a)) #define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask)) #define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) +#define PTR_ALIGN_DOWN(p, a) ((typeof(p))ALIGN_DOWN((unsigned long)(p), (a))) #define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) /* generic data direction definitions */ @@ -321,8 +321,7 @@ void panic(const char *fmt, ...) __noreturn __cold; void nmi_panic(struct pt_regs *regs, const char *msg); extern void oops_enter(void); extern void oops_exit(void); -void print_oops_end_marker(void); -extern int oops_may_print(void); +extern bool oops_may_print(void); void do_exit(long error_code) __noreturn; void complete_and_exit(struct completion *, long) __noreturn; @@ -346,7 +345,7 @@ int __must_check kstrtoll(const char *s, unsigned int base, long long *res); * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Used as a replacement for the simple_strtoull. Return code must be checked. + * Preferred over simple_strtoul(). Return code must be checked. */ static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res) { @@ -374,7 +373,7 @@ static inline int __must_check kstrtoul(const char *s, unsigned int base, unsign * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Used as a replacement for the simple_strtoull. Return code must be checked. + * Preferred over simple_strtol(). Return code must be checked. */ static inline int __must_check kstrtol(const char *s, unsigned int base, long *res) { diff --git a/include/linux/kexec.h b/include/linux/kexec.h index ea67910ae6b7..9e93bef52968 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -183,17 +183,24 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name, bool get_value); void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name); -int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf, - unsigned long buf_len); -void * __weak arch_kexec_kernel_image_load(struct kimage *image); -int __weak arch_kexec_apply_relocations_add(struct purgatory_info *pi, - Elf_Shdr *section, - const Elf_Shdr *relsec, - const Elf_Shdr *symtab); -int __weak arch_kexec_apply_relocations(struct purgatory_info *pi, - Elf_Shdr *section, - const Elf_Shdr *relsec, - const Elf_Shdr *symtab); +/* Architectures may override the below functions */ +int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, + unsigned long buf_len); +void *arch_kexec_kernel_image_load(struct kimage *image); +int arch_kexec_apply_relocations_add(struct purgatory_info *pi, + Elf_Shdr *section, + const Elf_Shdr *relsec, + const Elf_Shdr *symtab); +int arch_kexec_apply_relocations(struct purgatory_info *pi, + Elf_Shdr *section, + const Elf_Shdr *relsec, + const Elf_Shdr *symtab); +int arch_kimage_file_post_load_cleanup(struct kimage *image); +#ifdef CONFIG_KEXEC_SIG +int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, + unsigned long buf_len); +#endif +int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf); extern int kexec_add_buffer(struct kexec_buf *kbuf); int kexec_locate_mem_hole(struct kexec_buf *kbuf); diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 45b8cdc9fad7..9be1bff4f586 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -227,7 +227,6 @@ extern int arch_prepare_kprobe(struct kprobe *p); extern void arch_arm_kprobe(struct kprobe *p); extern void arch_disarm_kprobe(struct kprobe *p); extern int arch_init_kprobes(void); -extern void show_registers(struct pt_regs *regs); extern void kprobes_inc_nmissed_count(struct kprobe *p); extern bool arch_within_kprobe_blacklist(unsigned long addr); extern int arch_populate_kprobe_blacklist(void); diff --git a/include/linux/ktime.h b/include/linux/ktime.h index 42d2e6ac35f2..a12b5523cc18 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -23,6 +23,7 @@ #include <linux/time.h> #include <linux/jiffies.h> +#include <asm/bug.h> /* Nanosecond scalar representation for kernel time values */ typedef s64 ktime_t; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index ac83e9c1d82c..a23076765b4c 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -211,8 +211,8 @@ struct kvm_async_pf { void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); -int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, - unsigned long hva, struct kvm_arch_async_pf *arch); +bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, + unsigned long hva, struct kvm_arch_async_pf *arch); int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); #endif @@ -774,6 +774,7 @@ int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); +bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn); void mark_page_dirty(struct kvm *kvm, gfn_t gfn); @@ -816,6 +817,13 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible); void kvm_flush_remote_tlbs(struct kvm *kvm); void kvm_reload_remote_mmus(struct kvm *kvm); +#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE +int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min); +int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc); +void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc); +void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); +#endif + bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, struct kvm_vcpu *except, unsigned long *vcpu_bitmap, cpumask_var_t tmp); diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h index dc1da020305b..dac047abdba7 100644 --- a/include/linux/kvm_irqfd.h +++ b/include/linux/kvm_irqfd.h @@ -42,7 +42,7 @@ struct kvm_kernel_irqfd { wait_queue_entry_t wait; /* Update side is protected by irqfds.lock */ struct kvm_kernel_irq_routing_entry irq_entry; - seqcount_t irq_entry_sc; + seqcount_spinlock_t irq_entry_sc; /* Used for level IRQ fast-path */ int gsi; struct work_struct inject; diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index 68e84cf42a3f..a7580f69dda0 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -20,6 +20,8 @@ enum kvm_mr_change; #include <linux/types.h> +#include <asm/kvm_types.h> + /* * Address types: * @@ -58,4 +60,21 @@ struct gfn_to_pfn_cache { bool dirty; }; +#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE +/* + * Memory caches are used to preallocate memory ahead of various MMU flows, + * e.g. page fault handlers. Gracefully handling allocation failures deep in + * MMU flows is problematic, as is triggering reclaim, I/O, etc... while + * holding MMU locks. Note, these caches act more like prefetch buffers than + * classical caches, i.e. objects are not returned to the cache on being freed. + */ +struct kvm_mmu_memory_cache { + int nobjs; + gfp_t gfp_zero; + struct kmem_cache *kmem_cache; + void *objects[KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE]; +}; +#endif + + #endif /* __KVM_TYPES_H__ */ diff --git a/include/linux/leds-ti-lmu-common.h b/include/linux/leds-ti-lmu-common.h index 5eb111f38803..420b61e5a213 100644 --- a/include/linux/leds-ti-lmu-common.h +++ b/include/linux/leds-ti-lmu-common.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ // TI LMU Common Core -// Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ +// Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com/ #ifndef _TI_LMU_COMMON_H_ #define _TI_LMU_COMMON_H_ diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 18da4059be09..01f251b6e36c 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -76,8 +76,9 @@ typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc, struct device_node; struct nvdimm_bus_descriptor { const struct attribute_group **attr_groups; - unsigned long bus_dsm_mask; unsigned long cmd_mask; + unsigned long dimm_family_mask; + unsigned long bus_family_mask; struct module *module; char *provider_name; struct device_node *of_node; @@ -85,6 +86,7 @@ struct nvdimm_bus_descriptor { int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc); int (*clear_to_send)(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd, void *data); + const struct nvdimm_bus_fw_ops *fw_ops; }; struct nd_cmd_desc { @@ -199,6 +201,49 @@ struct nvdimm_security_ops { int (*query_overwrite)(struct nvdimm *nvdimm); }; +enum nvdimm_fwa_state { + NVDIMM_FWA_INVALID, + NVDIMM_FWA_IDLE, + NVDIMM_FWA_ARMED, + NVDIMM_FWA_BUSY, + NVDIMM_FWA_ARM_OVERFLOW, +}; + +enum nvdimm_fwa_trigger { + NVDIMM_FWA_ARM, + NVDIMM_FWA_DISARM, +}; + +enum nvdimm_fwa_capability { + NVDIMM_FWA_CAP_INVALID, + NVDIMM_FWA_CAP_NONE, + NVDIMM_FWA_CAP_QUIESCE, + NVDIMM_FWA_CAP_LIVE, +}; + +enum nvdimm_fwa_result { + NVDIMM_FWA_RESULT_INVALID, + NVDIMM_FWA_RESULT_NONE, + NVDIMM_FWA_RESULT_SUCCESS, + NVDIMM_FWA_RESULT_NOTSTAGED, + NVDIMM_FWA_RESULT_NEEDRESET, + NVDIMM_FWA_RESULT_FAIL, +}; + +struct nvdimm_bus_fw_ops { + enum nvdimm_fwa_state (*activate_state) + (struct nvdimm_bus_descriptor *nd_desc); + enum nvdimm_fwa_capability (*capability) + (struct nvdimm_bus_descriptor *nd_desc); + int (*activate)(struct nvdimm_bus_descriptor *nd_desc); +}; + +struct nvdimm_fw_ops { + enum nvdimm_fwa_state (*activate_state)(struct nvdimm *nvdimm); + enum nvdimm_fwa_result (*activate_result)(struct nvdimm *nvdimm); + int (*arm)(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arg); +}; + void badrange_init(struct badrange *badrange); int badrange_add(struct badrange *badrange, u64 addr, u64 length); void badrange_forget(struct badrange *badrange, phys_addr_t start, @@ -224,14 +269,15 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, const struct attribute_group **groups, unsigned long flags, unsigned long cmd_mask, int num_flush, struct resource *flush_wpq, const char *dimm_id, - const struct nvdimm_security_ops *sec_ops); + const struct nvdimm_security_ops *sec_ops, + const struct nvdimm_fw_ops *fw_ops); static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, const struct attribute_group **groups, unsigned long flags, unsigned long cmd_mask, int num_flush, struct resource *flush_wpq) { return __nvdimm_create(nvdimm_bus, provider_data, groups, flags, - cmd_mask, num_flush, flush_wpq, NULL, NULL); + cmd_mask, num_flush, flush_wpq, NULL, NULL, NULL); } const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd); diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 39a35699d0d6..62a382d1845b 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -11,6 +11,7 @@ #define __LINUX_LOCKDEP_H #include <linux/lockdep_types.h> +#include <linux/smp.h> #include <asm/percpu.h> struct task_struct; diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index af998f93d256..2a8c74d99015 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -15,7 +15,7 @@ */ /* - * The macro LSM_HOOK is used to define the data structures required by the + * The macro LSM_HOOK is used to define the data structures required by * the LSM framework using the pattern: * * LSM_HOOK(<return_type>, <default_value>, <hook_name>, args...) diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 95b7c1d32062..9e2e3e63719d 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -815,7 +815,7 @@ * structure. Note that the security field was not added directly to the * socket structure, but rather, the socket security information is stored * in the associated inode. Typically, the inode alloc_security hook will - * allocate and and attach security information to + * allocate and attach security information to * SOCK_INODE(sock)->i_security. This hook may be used to update the * SOCK_INODE(sock)->i_security field with additional information that * wasn't available when the inode was allocated. diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h index a96e8c252bac..05eea1aef5aa 100644 --- a/include/linux/mailbox/mtk-cmdq-mailbox.h +++ b/include/linux/mailbox/mtk-cmdq-mailbox.h @@ -90,4 +90,6 @@ struct cmdq_pkt { void *cl; }; +u8 cmdq_get_shift_pa(struct mbox_chan *chan); + #endif /* __MTK_CMDQ_MAILBOX_H__ */ diff --git a/include/linux/math64.h b/include/linux/math64.h index d097119419e6..3381d9e33c4e 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -281,4 +281,23 @@ u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div); #define DIV64_U64_ROUND_CLOSEST(dividend, divisor) \ ({ u64 _tmp = (divisor); div64_u64((dividend) + _tmp / 2, _tmp); }) +/* + * DIV_S64_ROUND_CLOSEST - signed 64bit divide with 32bit divisor rounded to nearest integer + * @dividend: signed 64bit dividend + * @divisor: signed 32bit divisor + * + * Divide signed 64bit dividend by signed 32bit divisor + * and round to closest integer. + * + * Return: dividend / divisor rounded to nearest integer + */ +#define DIV_S64_ROUND_CLOSEST(dividend, divisor)( \ +{ \ + s64 __x = (dividend); \ + s32 __d = (divisor); \ + ((__x > 0) == (__d > 0)) ? \ + div_s64((__x + (__d / 2)), __d) : \ + div_s64((__x - (__d / 2)), __d); \ +} \ +) #endif /* _LINUX_MATH64_H */ diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e77197a62809..385237e4cb44 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -23,6 +23,7 @@ #include <linux/page-flags.h> struct mem_cgroup; +struct obj_cgroup; struct page; struct mm_struct; struct kmem_cache; @@ -31,8 +32,7 @@ struct kmem_cache; enum memcg_stat_item { MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, MEMCG_SOCK, - /* XXX: why are these zone and not node counters? */ - MEMCG_KERNEL_STACK_KB, + MEMCG_PERCPU_B, MEMCG_NR_STAT, }; @@ -48,12 +48,6 @@ enum memcg_memory_event { MEMCG_NR_MEMORY_EVENTS, }; -enum mem_cgroup_protection { - MEMCG_PROT_NONE, - MEMCG_PROT_LOW, - MEMCG_PROT_MIN, -}; - struct mem_cgroup_reclaim_cookie { pg_data_t *pgdat; unsigned int generation; @@ -71,8 +65,8 @@ struct mem_cgroup_id { /* * Per memcg event counter is incremented at every pagein/pageout. With THP, - * it will be incremated by the number of pages. This counter is used for - * for trigger some periodic events. This is straightforward and better + * it will be incremented by the number of pages. This counter is used + * to trigger some periodic events. This is straightforward and better * than using jiffies etc. to handle periodic memcg event. */ enum mem_cgroup_events_target { @@ -193,6 +187,22 @@ struct memcg_cgwb_frn { }; /* + * Bucket for arbitrarily byte-sized objects charged to a memory + * cgroup. The bucket can be reparented in one piece when the cgroup + * is destroyed, without having to round up the individual references + * of all live memory objects in the wild. + */ +struct obj_cgroup { + struct percpu_ref refcnt; + struct mem_cgroup *memcg; + atomic_t nr_charged_bytes; + union { + struct list_head list; + struct rcu_head rcu; + }; +}; + +/* * The memory controller data structure. The memory controller controls both * page cache and RSS per cgroup. We would eventually like to provide * statistics based on the statistics developed by Rik Van Riel for clock-pro, @@ -300,7 +310,8 @@ struct mem_cgroup { /* Index in the kmem_cache->memcg_params.memcg_caches array */ int kmemcg_id; enum memcg_kmem_state kmem_state; - struct list_head kmem_caches; + struct obj_cgroup __rcu *objcg; + struct list_head objcg_list; /* list of inherited objcgs */ #endif #ifdef CONFIG_CGROUP_WRITEBACK @@ -329,6 +340,13 @@ struct mem_cgroup { extern struct mem_cgroup *root_mem_cgroup; +static __always_inline bool memcg_stat_item_in_bytes(int idx) +{ + if (idx == MEMCG_PERCPU_B) + return true; + return vmstat_item_in_bytes(idx); +} + static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) { return (memcg == root_mem_cgroup); @@ -339,12 +357,49 @@ static inline bool mem_cgroup_disabled(void) return !cgroup_subsys_enabled(memory_cgrp_subsys); } -static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg, +static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg, bool in_low_reclaim) { if (mem_cgroup_disabled()) return 0; + /* + * There is no reclaim protection applied to a targeted reclaim. + * We are special casing this specific case here because + * mem_cgroup_protected calculation is not robust enough to keep + * the protection invariant for calculated effective values for + * parallel reclaimers with different reclaim target. This is + * especially a problem for tail memcgs (as they have pages on LRU) + * which would want to have effective values 0 for targeted reclaim + * but a different value for external reclaim. + * + * Example + * Let's have global and A's reclaim in parallel: + * | + * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G) + * |\ + * | C (low = 1G, usage = 2.5G) + * B (low = 1G, usage = 0.5G) + * + * For the global reclaim + * A.elow = A.low + * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow + * C.elow = min(C.usage, C.low) + * + * With the effective values resetting we have A reclaim + * A.elow = 0 + * B.elow = B.low + * C.elow = C.low + * + * If the global reclaim races with A's reclaim then + * B.elow = C.elow = 0 because children_low_usage > A.elow) + * is possible and reclaiming B would be violating the protection. + * + */ + if (root == memcg) + return 0; + if (in_low_reclaim) return READ_ONCE(memcg->memory.emin); @@ -352,8 +407,36 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg, READ_ONCE(memcg->memory.elow)); } -enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root, - struct mem_cgroup *memcg); +void mem_cgroup_calculate_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg); + +static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg) +{ + /* + * The root memcg doesn't account charges, and doesn't support + * protection. + */ + return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg); + +} + +static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg) +{ + if (!mem_cgroup_supports_protection(memcg)) + return false; + + return READ_ONCE(memcg->memory.elow) >= + page_counter_read(&memcg->memory); +} + +static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg) +{ + if (!mem_cgroup_supports_protection(memcg)) + return false; + + return READ_ONCE(memcg->memory.emin) >= + page_counter_read(&memcg->memory); +} int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask); @@ -416,6 +499,33 @@ struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ return css ? container_of(css, struct mem_cgroup, css) : NULL; } +static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg) +{ + return percpu_ref_tryget(&objcg->refcnt); +} + +static inline void obj_cgroup_get(struct obj_cgroup *objcg) +{ + percpu_ref_get(&objcg->refcnt); +} + +static inline void obj_cgroup_put(struct obj_cgroup *objcg) +{ + percpu_ref_put(&objcg->refcnt); +} + +/* + * After the initialization objcg->memcg is always pointing at + * a valid memcg, but can be atomically swapped to the parent memcg. + * + * The caller must ensure that the returned memcg won't be released: + * e.g. acquire the rcu_read_lock or css_set_lock. + */ +static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) +{ + return READ_ONCE(objcg->memcg); +} + static inline void mem_cgroup_put(struct mem_cgroup *memcg) { if (memcg) @@ -679,11 +789,34 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, return x; } +void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, + int val); void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val); void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val); + void mod_memcg_obj_state(void *p, int idx, int val); +static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx, + int val) +{ + unsigned long flags; + + local_irq_save(flags); + __mod_lruvec_slab_state(p, idx, val); + local_irq_restore(flags); +} + +static inline void mod_memcg_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx, int val) +{ + unsigned long flags; + + local_irq_save(flags); + __mod_memcg_lruvec_state(lruvec, idx, val); + local_irq_restore(flags); +} + static inline void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val) { @@ -825,16 +958,26 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm, { } -static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg, +static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg, bool in_low_reclaim) { return 0; } -static inline enum mem_cgroup_protection mem_cgroup_protected( - struct mem_cgroup *root, struct mem_cgroup *memcg) +static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg) +{ +} + +static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg) +{ + return false; +} + +static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg) { - return MEMCG_PROT_NONE; + return false; } static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm, @@ -1057,6 +1200,11 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, return node_page_state(lruvec_pgdat(lruvec), idx); } +static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx, int val) +{ +} + static inline void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val) { @@ -1089,6 +1237,14 @@ static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, __mod_node_page_state(page_pgdat(page), idx, val); } +static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx, + int val) +{ + struct page *page = virt_to_head_page(p); + + mod_node_page_state(page_pgdat(page), idx, val); +} + static inline void mod_memcg_obj_state(void *p, int idx, int val) { } @@ -1341,9 +1497,6 @@ static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg, } #endif -struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); -void memcg_kmem_put_cache(struct kmem_cache *cachep); - #ifdef CONFIG_MEMCG_KMEM int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp, unsigned int nr_pages); @@ -1351,8 +1504,12 @@ void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages); int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); void __memcg_kmem_uncharge_page(struct page *page, int order); +struct obj_cgroup *get_obj_cgroup_from_current(void); + +int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); +void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); + extern struct static_key_false memcg_kmem_enabled_key; -extern struct workqueue_struct *memcg_kmem_cache_wq; extern int memcg_nr_cache_ids; void memcg_get_cache_ids(void); @@ -1368,7 +1525,19 @@ void memcg_put_cache_ids(void); static inline bool memcg_kmem_enabled(void) { - return static_branch_unlikely(&memcg_kmem_enabled_key); + return static_branch_likely(&memcg_kmem_enabled_key); +} + +static inline bool memcg_kmem_bypass(void) +{ + if (in_interrupt()) + return true; + + /* Allow remote memcg charging in kthread contexts. */ + if ((!current->mm || (current->flags & PF_KTHREAD)) && + !current->active_memcg) + return true; + return false; } static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index ea9c15b60a96..5f1c74df264d 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -6,7 +6,7 @@ #ifndef _LINUX_MEMPOLICY_H #define _LINUX_MEMPOLICY_H 1 - +#include <linux/sched.h> #include <linux/mmzone.h> #include <linux/dax.h> #include <linux/slab.h> @@ -28,7 +28,7 @@ struct mm_struct; * the process policy is used. Interrupts ignore the memory policy * of the current process. * - * Locking policy for interlave: + * Locking policy for interleave: * In process context there is no locking because only the process accesses * its own state. All vma manipulation is somewhat protected by a down_read on * mmap_lock. @@ -152,6 +152,15 @@ extern int huge_node(struct vm_area_struct *vma, extern bool init_nodemask_of_mempolicy(nodemask_t *mask); extern bool mempolicy_nodemask_intersects(struct task_struct *tsk, const nodemask_t *mask); +extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy); + +static inline nodemask_t *policy_nodemask_current(gfp_t gfp) +{ + struct mempolicy *mpol = get_task_policy(current); + + return policy_nodemask(gfp, mpol); +} + extern unsigned int mempolicy_slab_node(void); extern enum zone_type policy_zone; @@ -281,5 +290,10 @@ static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma, static inline void mpol_put_task_policy(struct task_struct *task) { } + +static inline nodemask_t *policy_nodemask_current(gfp_t gfp) +{ + return NULL; +} #endif /* CONFIG_NUMA */ #endif diff --git a/include/linux/mfd/sky81452.h b/include/linux/mfd/sky81452.h index d469aa481243..b08570ff34df 100644 --- a/include/linux/mfd/sky81452.h +++ b/include/linux/mfd/sky81452.h @@ -9,11 +9,9 @@ #ifndef _SKY81452_H #define _SKY81452_H -#include <linux/platform_data/sky81452-backlight.h> #include <linux/regulator/machine.h> struct sky81452_platform_data { - struct sky81452_bl_platform_data *bl_pdata; struct regulator_init_data *regulator_init_data; }; diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 540998d9810b..0f8d1583fa8e 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -10,6 +10,8 @@ typedef struct page *new_page_t(struct page *page, unsigned long private); typedef void free_page_t(struct page *page, unsigned long private); +struct migration_target_control; + /* * Return values from addresss_space_operations.migratepage(): * - negative errno on page migration failure; @@ -31,34 +33,6 @@ enum migrate_reason { /* In mm/debug.c; also keep sync with include/trace/events/migrate.h */ extern const char *migrate_reason_names[MR_TYPES]; -static inline struct page *new_page_nodemask(struct page *page, - int preferred_nid, nodemask_t *nodemask) -{ - gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL; - unsigned int order = 0; - struct page *new_page = NULL; - - if (PageHuge(page)) - return alloc_huge_page_nodemask(page_hstate(compound_head(page)), - preferred_nid, nodemask); - - if (PageTransHuge(page)) { - gfp_mask |= GFP_TRANSHUGE; - order = HPAGE_PMD_ORDER; - } - - if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE)) - gfp_mask |= __GFP_HIGHMEM; - - new_page = __alloc_pages_nodemask(gfp_mask, order, - preferred_nid, nodemask); - - if (new_page && PageTransHuge(new_page)) - prep_transhuge_page(new_page); - - return new_page; -} - #ifdef CONFIG_MIGRATION extern void putback_movable_pages(struct list_head *l); @@ -67,6 +41,7 @@ extern int migrate_page(struct address_space *mapping, enum migrate_mode mode); extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason); +extern struct page *alloc_migration_target(struct page *page, unsigned long private); extern int isolate_movable_page(struct page *page, isolate_mode_t mode); extern void putback_movable_page(struct page *page); @@ -85,6 +60,9 @@ static inline int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason) { return -ENOSYS; } +static inline struct page *alloc_migration_target(struct page *page, + unsigned long private) + { return NULL; } static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) { return -EBUSY; } diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 57db125e5802..4d3376e20f5e 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -276,7 +276,9 @@ enum { MLX5_MKEY_MASK_RW = 1ull << 20, MLX5_MKEY_MASK_A = 1ull << 21, MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23, - MLX5_MKEY_MASK_FREE = 1ull << 29, + MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE = 1ull << 25, + MLX5_MKEY_MASK_FREE = 1ull << 29, + MLX5_MKEY_MASK_RELAXED_ORDERING_READ = 1ull << 47, }; enum { @@ -1007,7 +1009,6 @@ enum { MLX5_MKEY_REMOTE_INVAL = 1 << 24, MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29, MLX5_MKEY_BSF_EN = 1 << 30, - MLX5_MKEY_LEN64 = 1 << 31, }; struct mlx5_mkey_seg { @@ -1361,11 +1362,11 @@ enum mlx5_qcam_feature_groups { MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap) #define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\ - MLX5_GET(device_virtio_emulation_cap, \ + MLX5_GET(virtio_emulation_cap, \ (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap) #define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\ - MLX5_GET64(device_virtio_emulation_cap, \ + MLX5_GET64(virtio_emulation_cap, \ (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap) #define MLX5_CAP_IPSEC(mdev, cap)\ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index a0fcc4d13e93..c145de0473bc 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -975,6 +975,7 @@ void mlx5_register_debugfs(void); void mlx5_unregister_debugfs(void); void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas); +void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm); void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, unsigned int *irqn); @@ -1057,6 +1058,7 @@ enum { enum { MLX5_INTERFACE_PROTOCOL_IB = 0, MLX5_INTERFACE_PROTOCOL_ETH = 1, + MLX5_INTERFACE_PROTOCOL_VDPA = 2, }; struct mlx5_interface { diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 35e4dc66b326..de1ffb4804d6 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -93,6 +93,7 @@ enum { enum { MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b, + MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d, MLX5_OBJ_TYPE_MKEY = 0xff01, MLX5_OBJ_TYPE_QP = 0xff02, MLX5_OBJ_TYPE_PSV = 0xff03, @@ -985,17 +986,40 @@ struct mlx5_ifc_device_event_cap_bits { u8 user_unaffiliated_events[4][0x40]; }; -struct mlx5_ifc_device_virtio_emulation_cap_bits { - u8 reserved_at_0[0x20]; +struct mlx5_ifc_virtio_emulation_cap_bits { + u8 desc_tunnel_offload_type[0x1]; + u8 eth_frame_offload_type[0x1]; + u8 virtio_version_1_0[0x1]; + u8 device_features_bits_mask[0xd]; + u8 event_mode[0x8]; + u8 virtio_queue_type[0x8]; - u8 reserved_at_20[0x13]; + u8 max_tunnel_desc[0x10]; + u8 reserved_at_30[0x3]; u8 log_doorbell_stride[0x5]; u8 reserved_at_38[0x3]; u8 log_doorbell_bar_size[0x5]; u8 doorbell_bar_offset[0x40]; - u8 reserved_at_80[0x780]; + u8 max_emulated_devices[0x8]; + u8 max_num_virtio_queues[0x18]; + + u8 reserved_at_a0[0x60]; + + u8 umem_1_buffer_param_a[0x20]; + + u8 umem_1_buffer_param_b[0x20]; + + u8 umem_2_buffer_param_a[0x20]; + + u8 umem_2_buffer_param_b[0x20]; + + u8 umem_3_buffer_param_a[0x20]; + + u8 umem_3_buffer_param_b[0x20]; + + u8 reserved_at_1c0[0x640]; }; enum { @@ -1220,7 +1244,11 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 max_sgl_for_optimized_performance[0x8]; u8 log_max_cq_sz[0x8]; - u8 reserved_at_d0[0xb]; + u8 relaxed_ordering_write_umr[0x1]; + u8 relaxed_ordering_read_umr[0x1]; + u8 reserved_at_d2[0x7]; + u8 virtio_net_device_emualtion_manager[0x1]; + u8 virtio_blk_device_emualtion_manager[0x1]; u8 log_max_cq[0x5]; u8 log_max_eq_sz[0x8]; @@ -1396,7 +1424,10 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 bf[0x1]; u8 driver_version[0x1]; u8 pad_tx_eth_packet[0x1]; - u8 reserved_at_263[0x8]; + u8 reserved_at_263[0x3]; + u8 mkey_by_name[0x1]; + u8 reserved_at_267[0x4]; + u8 log_bf_reg_size[0x5]; u8 reserved_at_270[0x8]; @@ -2953,7 +2984,7 @@ union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_fpga_cap_bits fpga_cap; struct mlx5_ifc_tls_cap_bits tls_cap; struct mlx5_ifc_device_mem_cap_bits device_mem_cap; - struct mlx5_ifc_device_virtio_emulation_cap_bits virtio_emulation_cap; + struct mlx5_ifc_virtio_emulation_cap_bits virtio_emulation_cap; u8 reserved_at_0[0x8000]; }; @@ -3302,15 +3333,18 @@ struct mlx5_ifc_scheduling_context_bits { }; struct mlx5_ifc_rqtc_bits { - u8 reserved_at_0[0xa0]; + u8 reserved_at_0[0xa0]; - u8 reserved_at_a0[0x10]; - u8 rqt_max_size[0x10]; + u8 reserved_at_a0[0x5]; + u8 list_q_type[0x3]; + u8 reserved_at_a8[0x8]; + u8 rqt_max_size[0x10]; - u8 reserved_at_c0[0x10]; - u8 rqt_actual_size[0x10]; + u8 rq_vhca_id_format[0x1]; + u8 reserved_at_c1[0xf]; + u8 rqt_actual_size[0x10]; - u8 reserved_at_e0[0x6a0]; + u8 reserved_at_e0[0x6a0]; struct mlx5_ifc_rq_num_bits rq_num[]; }; @@ -7092,7 +7126,7 @@ struct mlx5_ifc_destroy_mkey_out_bits { struct mlx5_ifc_destroy_mkey_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7723,8 +7757,10 @@ struct mlx5_ifc_create_qp_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 reserved_at_40[0x8]; + u8 input_qpn[0x18]; + u8 reserved_at_60[0x20]; u8 opt_param_mask[0x20]; u8 ece[0x20]; @@ -7788,7 +7824,7 @@ struct mlx5_ifc_create_mkey_out_bits { struct mlx5_ifc_create_mkey_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -10346,6 +10382,40 @@ struct mlx5_ifc_create_umem_in_bits { struct mlx5_ifc_umem_bits umem; }; +struct mlx5_ifc_create_umem_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x8]; + u8 umem_id[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_destroy_umem_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 umem_id[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_destroy_umem_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + struct mlx5_ifc_create_uctx_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; @@ -10358,6 +10428,18 @@ struct mlx5_ifc_create_uctx_in_bits { struct mlx5_ifc_uctx_bits uctx; }; +struct mlx5_ifc_create_uctx_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x10]; + u8 uid[0x10]; + + u8 reserved_at_60[0x20]; +}; + struct mlx5_ifc_destroy_uctx_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; @@ -10371,6 +10453,15 @@ struct mlx5_ifc_destroy_uctx_in_bits { u8 reserved_at_60[0x20]; }; +struct mlx5_ifc_destroy_uctx_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + struct mlx5_ifc_create_sw_icm_in_bits { struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; struct mlx5_ifc_sw_icm_bits sw_icm; @@ -10687,4 +10778,10 @@ struct mlx5_ifc_tls_progress_params_bits { u8 hw_offset_record_number[0x18]; }; +enum { + MLX5_MTT_PERM_READ = 1 << 0, + MLX5_MTT_PERM_WRITE = 1 << 1, + MLX5_MTT_PERM_RW = MLX5_MTT_PERM_READ | MLX5_MTT_PERM_WRITE, +}; + #endif /* MLX5_IFC_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index dc7b87310c10..e7602a3bcef1 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -38,6 +38,7 @@ struct file_ra_state; struct user_struct; struct writeback_control; struct bdi_writeback; +struct pt_regs; void init_mm_internals(void); @@ -206,6 +207,8 @@ int overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *, loff_t *); int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *, loff_t *); +int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *, + loff_t *); #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) @@ -317,8 +320,6 @@ extern unsigned int kobjsize(const void *objp); #if defined(CONFIG_X86) # define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */ -#elif defined(CONFIG_PPC) -# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ #elif defined(CONFIG_PARISC) # define VM_GROWSUP VM_ARCH_1 #elif defined(CONFIG_IA64) @@ -479,7 +480,7 @@ static inline bool fault_flag_allow_retry_first(unsigned int flags) { FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" } /* - * vm_fault is filled by the the pagefault handler and passed to the vma's + * vm_fault is filled by the pagefault handler and passed to the vma's * ->fault function. The vma's ->fault is responsible for returning a bitmask * of VM_FAULT_xxx flags that give details about how the fault was handled. * @@ -779,6 +780,11 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags) extern void kvfree(const void *addr); extern void kvfree_sensitive(const void *addr, size_t len); +static inline int head_mapcount(struct page *head) +{ + return atomic_read(compound_mapcount_ptr(head)) + 1; +} + /* * Mapcount of compound page as a whole, does not include mapped sub-pages. * @@ -788,7 +794,7 @@ static inline int compound_mapcount(struct page *page) { VM_BUG_ON_PAGE(!PageCompound(page), page); page = compound_head(page); - return atomic_read(compound_mapcount_ptr(page)) + 1; + return head_mapcount(page); } /* @@ -901,11 +907,16 @@ static inline bool hpage_pincount_available(struct page *page) return PageCompound(page) && compound_order(page) > 1; } +static inline int head_pincount(struct page *head) +{ + return atomic_read(compound_pincount_ptr(head)); +} + static inline int compound_pincount(struct page *page) { VM_BUG_ON_PAGE(!hpage_pincount_available(page), page); page = compound_head(page); - return atomic_read(compound_pincount_ptr(page)); + return head_pincount(page); } static inline void set_compound_order(struct page *page, unsigned int order) @@ -1648,8 +1659,9 @@ int invalidate_inode_page(struct page *page); #ifdef CONFIG_MMU extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma, - unsigned long address, unsigned int flags); -extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, + unsigned long address, unsigned int flags, + struct pt_regs *regs); +extern int fixup_user_fault(struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked); void unmap_mapping_pages(struct address_space *mapping, @@ -1658,14 +1670,14 @@ void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows); #else static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma, - unsigned long address, unsigned int flags) + unsigned long address, unsigned int flags, + struct pt_regs *regs) { /* should never happen if there's no MMU */ BUG(); return VM_FAULT_SIGBUS; } -static inline int fixup_user_fault(struct task_struct *tsk, - struct mm_struct *mm, unsigned long address, +static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked) { /* should never happen if there's no MMU */ @@ -1691,11 +1703,11 @@ extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, unsigned long addr, void *buf, int len, unsigned int gup_flags); -long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, +long get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked); -long pin_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, +long pin_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked); @@ -2093,51 +2105,11 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d, NULL : pud_offset(p4d, address); } -static inline p4d_t *p4d_alloc_track(struct mm_struct *mm, pgd_t *pgd, - unsigned long address, - pgtbl_mod_mask *mod_mask) - -{ - if (unlikely(pgd_none(*pgd))) { - if (__p4d_alloc(mm, pgd, address)) - return NULL; - *mod_mask |= PGTBL_PGD_MODIFIED; - } - - return p4d_offset(pgd, address); -} - -static inline pud_t *pud_alloc_track(struct mm_struct *mm, p4d_t *p4d, - unsigned long address, - pgtbl_mod_mask *mod_mask) -{ - if (unlikely(p4d_none(*p4d))) { - if (__pud_alloc(mm, p4d, address)) - return NULL; - *mod_mask |= PGTBL_P4D_MODIFIED; - } - - return pud_offset(p4d, address); -} - static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) { return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))? NULL: pmd_offset(pud, address); } - -static inline pmd_t *pmd_alloc_track(struct mm_struct *mm, pud_t *pud, - unsigned long address, - pgtbl_mod_mask *mod_mask) -{ - if (unlikely(pud_none(*pud))) { - if (__pmd_alloc(mm, pud, address)) - return NULL; - *mod_mask |= PGTBL_PUD_MODIFIED; - } - - return pmd_offset(pud, address); -} #endif /* CONFIG_MMU */ #if USE_SPLIT_PTE_PTLOCKS @@ -2253,11 +2225,6 @@ static inline void pgtable_pte_page_dtor(struct page *page) ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \ NULL: pte_offset_kernel(pmd, address)) -#define pte_alloc_kernel_track(pmd, address, mask) \ - ((unlikely(pmd_none(*(pmd))) && \ - (__pte_alloc_kernel(pmd) || ({*(mask)|=PGTBL_PMD_MODIFIED;0;})))?\ - NULL: pte_offset_kernel(pmd, address)) - #if USE_SPLIT_PMD_PTLOCKS static struct page *pmd_to_page(pmd_t *pmd) @@ -2415,9 +2382,6 @@ static inline unsigned long get_num_physpages(void) * for_each_valid_physical_page_range() * memblock_add_node(base, size, nid) * free_area_init(max_zone_pfns); - * - * sparse_memory_present_with_active_regions() calls memory_present() for - * each range when SPARSEMEM is enabled. */ void free_area_init(unsigned long *max_zone_pfn); unsigned long node_map_pfn_alignment(void); @@ -2428,7 +2392,6 @@ extern unsigned long absent_pages_in_range(unsigned long start_pfn, extern void get_pfn_range_for_nid(unsigned int nid, unsigned long *start_pfn, unsigned long *end_pfn); extern unsigned long find_min_pfn_with_active_regions(void); -extern void sparse_memory_present_with_active_regions(int nid); #ifndef CONFIG_NEED_MULTIPLE_NODES static inline int early_pfn_to_nid(unsigned long pfn) @@ -2579,23 +2542,13 @@ extern unsigned long mmap_region(struct file *file, unsigned long addr, struct list_head *uf); extern unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, - vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, - struct list_head *uf); + unsigned long pgoff, unsigned long *populate, struct list_head *uf); extern int __do_munmap(struct mm_struct *, unsigned long, size_t, struct list_head *uf, bool downgrade); extern int do_munmap(struct mm_struct *, unsigned long, size_t, struct list_head *uf); extern int do_madvise(unsigned long start, size_t len_in, int behavior); -static inline unsigned long -do_mmap_pgoff(struct file *file, unsigned long addr, - unsigned long len, unsigned long prot, unsigned long flags, - unsigned long pgoff, unsigned long *populate, - struct list_head *uf) -{ - return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf); -} - #ifdef CONFIG_MMU extern int __mm_populate(unsigned long addr, unsigned long len, int ignore_errors); @@ -2648,7 +2601,7 @@ extern unsigned long stack_guard_gap; /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ extern int expand_stack(struct vm_area_struct *vma, unsigned long address); -/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */ +/* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */ extern int expand_downwards(struct vm_area_struct *vma, unsigned long address); #if VM_GROWSUP @@ -3011,14 +2964,15 @@ pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); -pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); +pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, + struct vmem_altmap *altmap); void *vmemmap_alloc_block(unsigned long size, int node); struct vmem_altmap; -void *vmemmap_alloc_block_buf(unsigned long size, int node); -void *altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap); +void *vmemmap_alloc_block_buf(unsigned long size, int node, + struct vmem_altmap *altmap); void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); int vmemmap_populate_basepages(unsigned long start, unsigned long end, - int node); + int node, struct vmem_altmap *altmap); int vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap); void vmemmap_populate_print_last(void); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 64ede5f150dc..0277fbab7c93 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -198,7 +198,10 @@ struct page { atomic_t _refcount; #ifdef CONFIG_MEMCG - struct mem_cgroup *mem_cgroup; + union { + struct mem_cgroup *mem_cgroup; + struct obj_cgroup **obj_cgroups; + }; #endif /* diff --git a/include/linux/mman.h b/include/linux/mman.h index 4b08e9c9c538..6f34c33075f9 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -57,8 +57,12 @@ extern struct percpu_counter vm_committed_as; #ifdef CONFIG_SMP extern s32 vm_committed_as_batch; +extern void mm_compute_batch(int overcommit_policy); #else #define vm_committed_as_batch 0 +static inline void mm_compute_batch(int overcommit_policy) +{ +} #endif unsigned long vm_memory_committed(void); diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index c6f0708195cd..b8200782dede 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -521,6 +521,16 @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *range, range->flags = flags; } +static inline void mmu_notifier_range_init_migrate( + struct mmu_notifier_range *range, unsigned int flags, + struct vm_area_struct *vma, struct mm_struct *mm, + unsigned long start, unsigned long end, void *pgmap) +{ + mmu_notifier_range_init(range, MMU_NOTIFY_MIGRATE, flags, vma, mm, + start, end); + range->migrate_pgmap_owner = pgmap; +} + #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \ ({ \ int __young; \ @@ -645,6 +655,9 @@ static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range, #define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \ _mmu_notifier_range_init(range, start, end) +#define mmu_notifier_range_init_migrate(range, flags, vma, mm, start, end, \ + pgmap) \ + _mmu_notifier_range_init(range, start, end) static inline bool mmu_notifier_range_blockable(const struct mmu_notifier_range *range) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index f6f884970511..8379432f4f2f 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -88,12 +88,10 @@ static inline bool is_migrate_movable(int mt) extern int page_group_by_mobility_disabled; -#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1) -#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1) +#define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1) #define get_pageblock_migratetype(page) \ - get_pfnblock_flags_mask(page, page_to_pfn(page), \ - PB_migrate_end, MIGRATETYPE_MASK) + get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK) struct free_area { struct list_head free_list[MIGRATE_TYPES]; @@ -155,10 +153,6 @@ enum zone_stat_item { NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */ NR_MLOCK, /* mlock()ed pages found and moved off LRU */ NR_PAGETABLE, /* used for pagetables */ - NR_KERNEL_STACK_KB, /* measured in KiB */ -#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) - NR_KERNEL_SCS_KB, /* measured in KiB */ -#endif /* Second 128 byte cacheline */ NR_BOUNCE, #if IS_ENABLED(CONFIG_ZSMALLOC) @@ -174,14 +168,20 @@ enum node_stat_item { NR_INACTIVE_FILE, /* " " " " " */ NR_ACTIVE_FILE, /* " " " " " */ NR_UNEVICTABLE, /* " " " " " */ - NR_SLAB_RECLAIMABLE, - NR_SLAB_UNRECLAIMABLE, + NR_SLAB_RECLAIMABLE_B, + NR_SLAB_UNRECLAIMABLE_B, NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ WORKINGSET_NODES, - WORKINGSET_REFAULT, - WORKINGSET_ACTIVATE, - WORKINGSET_RESTORE, + WORKINGSET_REFAULT_BASE, + WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE, + WORKINGSET_REFAULT_FILE, + WORKINGSET_ACTIVATE_BASE, + WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE, + WORKINGSET_ACTIVATE_FILE, + WORKINGSET_RESTORE_BASE, + WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE, + WORKINGSET_RESTORE_FILE, WORKINGSET_NODERECLAIM, NR_ANON_MAPPED, /* Mapped anonymous pages */ NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. @@ -203,10 +203,34 @@ enum node_stat_item { NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */ NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */ NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */ + NR_KERNEL_STACK_KB, /* measured in KiB */ +#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) + NR_KERNEL_SCS_KB, /* measured in KiB */ +#endif NR_VM_NODE_STAT_ITEMS }; /* + * Returns true if the value is measured in bytes (most vmstat values are + * measured in pages). This defines the API part, the internal representation + * might be different. + */ +static __always_inline bool vmstat_item_in_bytes(int idx) +{ + /* + * Global and per-node slab counters track slab pages. + * It's expected that changes are multiples of PAGE_SIZE. + * Internally values are stored in pages. + * + * Per-memcg and per-lruvec counters track memory, consumed + * by individual slab objects. These counters are actually + * byte-precise. + */ + return (idx == NR_SLAB_RECLAIMABLE_B || + idx == NR_SLAB_UNRECLAIMABLE_B); +} + +/* * We do arithmetic on the LRU lists in various places in the code, * so it is important to keep the active lists LRU_ACTIVE higher in * the array than the corresponding inactive lists, and to keep @@ -259,8 +283,8 @@ struct lruvec { unsigned long file_cost; /* Non-resident age, driven by LRU movement */ atomic_long_t nonresident_age; - /* Refaults at the time of last reclaim cycle */ - unsigned long refaults; + /* Refaults at the time of last reclaim cycle, anon=0, file=1 */ + unsigned long refaults[2]; /* Various lruvec state flags (enum lruvec_flags) */ unsigned long flags; #ifdef CONFIG_MEMCG @@ -512,6 +536,7 @@ struct zone { * On compaction failure, 1<<compact_defer_shift compactions * are skipped before trying again. The number attempted since * last failure is tracked with compact_considered. + * compact_order_failed is the minimum compaction failed order. */ unsigned int compact_considered; unsigned int compact_defer_shift; @@ -819,18 +844,6 @@ static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx); -#ifdef CONFIG_HAVE_MEMORY_PRESENT -void memory_present(int nid, unsigned long start, unsigned long end); -#else -static inline void memory_present(int nid, unsigned long start, unsigned long end) {} -#endif - -#if defined(CONFIG_SPARSEMEM) -void memblocks_present(void); -#else -static inline void memblocks_present(void) {} -#endif - #ifdef CONFIG_HAVE_MEMORYLESS_NODES int local_memory_node(int node_id); #else @@ -1387,8 +1400,6 @@ struct mminit_pfnnid_cache { #define early_pfn_valid(pfn) (1) #endif -void memory_present(int nid, unsigned long start, unsigned long end); - /* * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we * need to check pfn validity within that MAX_ORDER_NR_PAGES block. diff --git a/include/linux/module.h b/include/linux/module.h index 2e6670860d27..e30ed5fa33a7 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -389,6 +389,7 @@ struct module { unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const s32 *gpl_crcs; + bool using_gplonly_symbols; #ifdef CONFIG_UNUSED_SYMBOLS /* unused exported symbols. */ @@ -582,34 +583,14 @@ struct module *find_module(const char *name); struct symsearch { const struct kernel_symbol *start, *stop; const s32 *crcs; - enum { + enum mod_license { NOT_GPL_ONLY, GPL_ONLY, WILL_BE_GPL_ONLY, - } licence; + } license; bool unused; }; -/* - * Search for an exported symbol by name. - * - * Must be called with module_mutex held or preemption disabled. - */ -const struct kernel_symbol *find_symbol(const char *name, - struct module **owner, - const s32 **crc, - bool gplok, - bool warn); - -/* - * Walk the exported symbol table - * - * Must be called with module_mutex held or preemption disabled. - */ -bool each_symbol_section(bool (*fn)(const struct symsearch *arr, - struct module *owner, - void *data), void *data); - /* Returns 0 and fills in value, defined and namebuf, or -ERANGE if symnum out of range. */ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, @@ -657,7 +638,6 @@ static inline void __module_get(struct module *module) #define symbol_put_addr(p) do { } while (0) #endif /* CONFIG_MODULE_UNLOAD */ -int ref_module(struct module *a, struct module *b); /* This is a #define so the string doesn't get put in every .o file */ #define module_name(mod) \ diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 3ef917ff0964..1ad5aa3b86d9 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -108,7 +108,7 @@ struct kparam_array * ".") the kernel commandline parameter. Note that - is changed to _, so * the user can use "foo-bar=1" even for variable "foo_bar". * - * @perm is 0 if the the variable is not to appear in sysfs, or 0444 + * @perm is 0 if the variable is not to appear in sysfs, or 0444 * for world-readable, 0644 for root-writable, etc. Note that if it * is writable, you may need to use kernel_param_lock() around * accesses (esp. charp, which can be kfreed when it changes). diff --git a/include/linux/mtd/hyperbus.h b/include/linux/mtd/hyperbus.h index 2dfe65964f6e..2129f7d3b6eb 100644 --- a/include/linux/mtd/hyperbus.h +++ b/include/linux/mtd/hyperbus.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 * - * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/ */ #ifndef __LINUX_MTD_HYPERBUS_H__ diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 0c7483843a32..af99041ceaa9 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -12,6 +12,8 @@ #include <linux/mtd/mtd.h> +struct nand_device; + /** * struct nand_memory_organization - Memory organization structure * @bits_per_cell: number of bits per NAND cell @@ -114,11 +116,11 @@ struct nand_page_io_req { }; /** - * struct nand_ecc_req - NAND ECC requirements + * struct nand_ecc_props - NAND ECC properties * @strength: ECC strength - * @step_size: ECC step/block size + * @step_size: Number of bytes per step */ -struct nand_ecc_req { +struct nand_ecc_props { unsigned int strength; unsigned int step_size; }; @@ -133,8 +135,6 @@ struct nand_bbt { unsigned long *cache; }; -struct nand_device; - /** * struct nand_ops - NAND operations * @erase: erase a specific block. No need to check if the block is bad before @@ -179,7 +179,7 @@ struct nand_ops { struct nand_device { struct mtd_info mtd; struct nand_memory_organization memorg; - struct nand_ecc_req eccreq; + struct nand_ecc_props eccreq; struct nand_row_converter rowconv; struct nand_bbt bbt; const struct nand_ops *ops; diff --git a/include/linux/mtd/pfow.h b/include/linux/mtd/pfow.h index 122f3439e1af..6166e7c60869 100644 --- a/include/linux/mtd/pfow.h +++ b/include/linux/mtd/pfow.h @@ -19,7 +19,7 @@ /* Identification info for LPDDR chip */ #define PFOW_MANUFACTURER_ID 0x0020 #define PFOW_DEVICE_ID 0x0022 -/* Address in PFOW where prog buffer can can be found */ +/* Address in PFOW where prog buffer can be found */ #define PFOW_PROGRAM_BUFFER_OFFSET 0x0040 /* Size of program buffer in words */ #define PFOW_PROGRAM_BUFFER_SIZE 0x0042 diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 65b1c1c18b41..a725b620aca2 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -492,22 +492,22 @@ struct nand_sdr_timings { }; /** - * enum nand_data_interface_type - NAND interface timing type + * enum nand_interface_type - NAND interface type * @NAND_SDR_IFACE: Single Data Rate interface */ -enum nand_data_interface_type { +enum nand_interface_type { NAND_SDR_IFACE, }; /** - * struct nand_data_interface - NAND interface timing + * struct nand_interface_config - NAND interface timing * @type: type of the timing * @timings: The timing information * @timings.mode: Timing mode as defined in the specification * @timings.sdr: Use it when @type is %NAND_SDR_IFACE. */ -struct nand_data_interface { - enum nand_data_interface_type type; +struct nand_interface_config { + enum nand_interface_type type; struct nand_timings { unsigned int mode; union { @@ -521,7 +521,7 @@ struct nand_data_interface { * @conf: The data interface */ static inline const struct nand_sdr_timings * -nand_get_sdr_timings(const struct nand_data_interface *conf) +nand_get_sdr_timings(const struct nand_interface_config *conf) { if (conf->type != NAND_SDR_IFACE) return ERR_PTR(-EINVAL); @@ -944,11 +944,10 @@ static inline void nand_op_trace(const char *prefix, * This method replaces chip->legacy.cmdfunc(), * chip->legacy.{read,write}_{buf,byte,word}(), * chip->legacy.dev_ready() and chip->legacy.waifunc(). - * @setup_data_interface: setup the data interface and timing. If - * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this - * means the configuration should not be applied but - * only checked. - * This hook is optional. + * @setup_interface: setup the data interface and timing. If chipnr is set to + * %NAND_DATA_IFACE_CHECK_ONLY this means the configuration + * should not be applied but only checked. + * This hook is optional. */ struct nand_controller_ops { int (*attach_chip)(struct nand_chip *chip); @@ -956,8 +955,8 @@ struct nand_controller_ops { int (*exec_op)(struct nand_chip *chip, const struct nand_operation *op, bool check_only); - int (*setup_data_interface)(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf); + int (*setup_interface)(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf); }; /** @@ -1028,140 +1027,138 @@ struct nand_legacy { }; /** - * struct nand_chip - NAND Private Flash Chip Data - * @base: Inherit from the generic NAND device - * @legacy: All legacy fields/hooks. If you develop a new driver, - * don't even try to use any of these fields/hooks, and if - * you're modifying an existing driver that is using those - * fields/hooks, you should consider reworking the driver - * avoid using them. - * @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for - * setting the read-retry mode. Mostly needed for MLC NAND. - * @ecc: [BOARDSPECIFIC] ECC control structure - * @buf_align: minimum buffer alignment required by a platform - * @oob_poi: "poison value buffer," used for laying out OOB data - * before writing - * @page_shift: [INTERN] number of address bits in a page (column - * address bits). - * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock - * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry - * @chip_shift: [INTERN] number of address bits in one chip - * @options: [BOARDSPECIFIC] various chip options. They can partly - * be set to inform nand_scan about special functionality. - * See the defines for further explanation. - * @bbt_options: [INTERN] bad block specific options. All options used - * here must come from bbm.h. By default, these options - * will be copied to the appropriate nand_bbt_descr's. - * @badblockpos: [INTERN] position of the bad block marker in the oob - * area. - * @badblockbits: [INTERN] minimum number of set bits in a good block's - * bad block marker position; i.e., BBM == 11110111b is - * not bad when badblockbits == 7 - * @onfi_timing_mode_default: [INTERN] default ONFI timing mode. This field is - * set to the actually used ONFI mode if the chip is - * ONFI compliant or deduced from the datasheet if - * the NAND chip is not ONFI compliant. - * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1 - * @data_buf: [INTERN] buffer for data, size is (page size + oobsize). - * @pagecache: Structure containing page cache related fields - * @pagecache.bitflips: Number of bitflips of the cached page - * @pagecache.page: Page number currently in the cache. -1 means no page is - * currently cached - * @subpagesize: [INTERN] holds the subpagesize - * @id: [INTERN] holds NAND ID - * @parameters: [INTERN] holds generic parameters under an easily - * readable form. - * @data_interface: [INTERN] NAND interface timing information - * @cur_cs: currently selected target. -1 means no target selected, - * otherwise we should always have cur_cs >= 0 && - * cur_cs < nanddev_ntargets(). NAND Controller drivers - * should not modify this value, but they're allowed to - * read it. - * @read_retries: [INTERN] the number of read retry modes supported - * @lock: lock protecting the suspended field. Also used to - * serialize accesses to the NAND device. - * @suspended: set to 1 when the device is suspended, 0 when it's not. - * @suspend: [REPLACEABLE] specific NAND device suspend operation - * @resume: [REPLACEABLE] specific NAND device resume operation - * @bbt: [INTERN] bad block table pointer - * @bbt_td: [REPLACEABLE] bad block table descriptor for flash - * lookup. - * @bbt_md: [REPLACEABLE] bad block table mirror descriptor - * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for initial - * bad block scan. - * @controller: [REPLACEABLE] a pointer to a hardware controller - * structure which is shared among multiple independent - * devices. - * @priv: [OPTIONAL] pointer to private chip data - * @manufacturer: [INTERN] Contains manufacturer information - * @manufacturer.desc: [INTERN] Contains manufacturer's description - * @manufacturer.priv: [INTERN] Contains manufacturer private information - * @lock_area: [REPLACEABLE] specific NAND chip lock operation - * @unlock_area: [REPLACEABLE] specific NAND chip unlock operation + * struct nand_chip_ops - NAND chip operations + * @suspend: Suspend operation + * @resume: Resume operation + * @lock_area: Lock operation + * @unlock_area: Unlock operation + * @setup_read_retry: Set the read-retry mode (mostly needed for MLC NANDs) + * @choose_interface_config: Choose the best interface configuration + */ +struct nand_chip_ops { + int (*suspend)(struct nand_chip *chip); + void (*resume)(struct nand_chip *chip); + int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); + int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); + int (*setup_read_retry)(struct nand_chip *chip, int retry_mode); + int (*choose_interface_config)(struct nand_chip *chip, + struct nand_interface_config *iface); +}; + +/** + * struct nand_manufacturer - NAND manufacturer structure + * @desc: The manufacturer description + * @priv: Private information for the manufacturer driver */ +struct nand_manufacturer { + const struct nand_manufacturer_desc *desc; + void *priv; +}; +/** + * struct nand_chip - NAND Private Flash Chip Data + * @base: Inherit from the generic NAND device + * @id: Holds NAND ID + * @parameters: Holds generic parameters under an easily readable form + * @manufacturer: Manufacturer information + * @ops: NAND chip operations + * @legacy: All legacy fields/hooks. If you develop a new driver, don't even try + * to use any of these fields/hooks, and if you're modifying an + * existing driver that is using those fields/hooks, you should + * consider reworking the driver and avoid using them. + * @options: Various chip options. They can partly be set to inform nand_scan + * about special functionality. See the defines for further + * explanation. + * @current_interface_config: The currently used NAND interface configuration + * @best_interface_config: The best NAND interface configuration which fits both + * the NAND chip and NAND controller constraints. If + * unset, the default reset interface configuration must + * be used. + * @bbt_erase_shift: Number of address bits in a bbt entry + * @bbt_options: Bad block table specific options. All options used here must + * come from bbm.h. By default, these options will be copied to + * the appropriate nand_bbt_descr's. + * @badblockpos: Bad block marker position in the oob area + * @badblockbits: Minimum number of set bits in a good block's bad block marker + * position; i.e., BBM = 11110111b is good when badblockbits = 7 + * @bbt_td: Bad block table descriptor for flash lookup + * @bbt_md: Bad block table mirror descriptor + * @badblock_pattern: Bad block scan pattern used for initial bad block scan + * @bbt: Bad block table pointer + * @page_shift: Number of address bits in a page (column address bits) + * @phys_erase_shift: Number of address bits in a physical eraseblock + * @chip_shift: Number of address bits in one chip + * @pagemask: Page number mask = number of (pages / chip) - 1 + * @subpagesize: Holds the subpagesize + * @data_buf: Buffer for data, size is (page size + oobsize) + * @oob_poi: pointer on the OOB area covered by data_buf + * @pagecache: Structure containing page cache related fields + * @pagecache.bitflips: Number of bitflips of the cached page + * @pagecache.page: Page number currently in the cache. -1 means no page is + * currently cached + * @buf_align: Minimum buffer alignment required by a platform + * @lock: Lock protecting the suspended field. Also used to serialize accesses + * to the NAND device + * @suspended: Set to 1 when the device is suspended, 0 when it's not + * @cur_cs: Currently selected target. -1 means no target selected, otherwise we + * should always have cur_cs >= 0 && cur_cs < nanddev_ntargets(). + * NAND Controller drivers should not modify this value, but they're + * allowed to read it. + * @read_retries: The number of read retry modes supported + * @controller: The hardware controller structure which is shared among multiple + * independent devices + * @ecc: The ECC controller structure + * @priv: Chip private data + */ struct nand_chip { struct nand_device base; - + struct nand_id id; + struct nand_parameters parameters; + struct nand_manufacturer manufacturer; + struct nand_chip_ops ops; struct nand_legacy legacy; + unsigned int options; - int (*setup_read_retry)(struct nand_chip *chip, int retry_mode); + /* Data interface */ + const struct nand_interface_config *current_interface_config; + struct nand_interface_config *best_interface_config; - unsigned int options; + /* Bad block information */ + unsigned int bbt_erase_shift; unsigned int bbt_options; + unsigned int badblockpos; + unsigned int badblockbits; + struct nand_bbt_descr *bbt_td; + struct nand_bbt_descr *bbt_md; + struct nand_bbt_descr *badblock_pattern; + u8 *bbt; - int page_shift; - int phys_erase_shift; - int bbt_erase_shift; - int chip_shift; - int pagemask; - u8 *data_buf; + /* Device internal layout */ + unsigned int page_shift; + unsigned int phys_erase_shift; + unsigned int chip_shift; + unsigned int pagemask; + unsigned int subpagesize; + /* Buffers */ + u8 *data_buf; + u8 *oob_poi; struct { unsigned int bitflips; int page; } pagecache; + unsigned long buf_align; - int subpagesize; - int onfi_timing_mode_default; - unsigned int badblockpos; - int badblockbits; - - struct nand_id id; - struct nand_parameters parameters; - - struct nand_data_interface data_interface; - - int cur_cs; - - int read_retries; - + /* Internals */ struct mutex lock; unsigned int suspended : 1; - int (*suspend)(struct nand_chip *chip); - void (*resume)(struct nand_chip *chip); + int cur_cs; + int read_retries; - uint8_t *oob_poi; + /* Externals */ struct nand_controller *controller; - struct nand_ecc_ctrl ecc; - unsigned long buf_align; - - uint8_t *bbt; - struct nand_bbt_descr *bbt_td; - struct nand_bbt_descr *bbt_md; - - struct nand_bbt_descr *badblock_pattern; - void *priv; - - struct { - const struct nand_manufacturer *desc; - void *priv; - } manufacturer; - - int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); - int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); }; extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops; @@ -1209,6 +1206,17 @@ static inline struct device_node *nand_get_flash_node(struct nand_chip *chip) return mtd_get_of_node(nand_to_mtd(chip)); } +/** + * nand_get_interface_config - Retrieve the current interface configuration + * of a NAND chip + * @chip: The NAND chip + */ +static inline const struct nand_interface_config * +nand_get_interface_config(struct nand_chip *chip) +{ + return chip->current_interface_config; +} + /* * A helper for defining older NAND chips where the second ID byte fully * defined the chip, including the geometry (chip size, eraseblock size, page @@ -1261,10 +1269,6 @@ static inline struct device_node *nand_get_flash_node(struct nand_chip *chip) * @ecc_step_ds in nand_chip{}, also from the datasheet. * For example, the "4bit ECC for each 512Byte" can be set with * NAND_ECC_INFO(4, 512). - * @onfi_timing_mode_default: the default ONFI timing mode entered after a NAND - * reset. Should be deduced from timings described - * in the datasheet. - * */ struct nand_flash_dev { char *name; @@ -1285,7 +1289,6 @@ struct nand_flash_dev { uint16_t strength_ds; uint16_t step_ds; } ecc; - int onfi_timing_mode_default; }; int nand_create_bbt(struct nand_chip *chip); diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 1077c45721ff..7b78c4ba9b3e 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -309,7 +309,7 @@ struct spinand_info { struct spinand_devid devid; u32 flags; struct nand_memory_organization memorg; - struct nand_ecc_req eccreq; + struct nand_ecc_props eccreq; struct spinand_ecc_info eccinfo; struct { const struct spinand_op_variants *read_cache; diff --git a/include/linux/mutex.h b/include/linux/mutex.h index ae197cc00cc8..dcd185cbfe79 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -65,6 +65,17 @@ struct mutex { #endif }; +struct ww_class; +struct ww_acquire_ctx; + +struct ww_mutex { + struct mutex base; + struct ww_acquire_ctx *ctx; +#ifdef CONFIG_DEBUG_MUTEXES + struct ww_class *ww_class; +#endif +}; + /* * This is the control structure for tasks blocked on mutex, * which resides on the blocked task's kernel stack: diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 4dba3c948932..33ebe476428e 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -150,6 +150,12 @@ enum nfs_opnum4 { OP_WRITE_SAME = 70, OP_CLONE = 71, + /* xattr support (RFC8726) */ + OP_GETXATTR = 72, + OP_SETXATTR = 73, + OP_LISTXATTRS = 74, + OP_REMOVEXATTR = 75, + OP_ILLEGAL = 10044, }; @@ -159,7 +165,7 @@ Needs to be updated if more operations are defined in future.*/ #define FIRST_NFS4_OP OP_ACCESS #define LAST_NFS40_OP OP_RELEASE_LOCKOWNER #define LAST_NFS41_OP OP_RECLAIM_COMPLETE -#define LAST_NFS42_OP OP_CLONE +#define LAST_NFS42_OP OP_REMOVEXATTR #define LAST_NFS4_OP LAST_NFS42_OP enum nfsstat4 { @@ -280,6 +286,10 @@ enum nfsstat4 { NFS4ERR_WRONG_LFS = 10092, NFS4ERR_BADLABEL = 10093, NFS4ERR_OFFLOAD_NO_REQS = 10094, + + /* xattr (RFC8276) */ + NFS4ERR_NOXATTR = 10095, + NFS4ERR_XATTR2BIG = 10096, }; static inline bool seqid_mutating_err(u32 err) @@ -452,6 +462,7 @@ enum change_attr_type4 { #define FATTR4_WORD2_CHANGE_ATTR_TYPE (1UL << 15) #define FATTR4_WORD2_SECURITY_LABEL (1UL << 16) #define FATTR4_WORD2_MODE_UMASK (1UL << 17) +#define FATTR4_WORD2_XATTR_SUPPORT (1UL << 18) /* MDS threshold bitmap bits */ #define THRESHOLD_RD (1UL << 0) @@ -700,4 +711,13 @@ struct nl4_server { struct nfs42_netaddr nl4_addr; /* NL4_NETADDR */ } u; }; + +/* + * Options for setxattr. These match the flags for setxattr(2). + */ +enum nfs4_setxattr_options { + SETXATTR4_EITHER = 0, + SETXATTR4_CREATE = 1, + SETXATTR4_REPLACE = 2, +}; #endif diff --git a/include/linux/oom.h b/include/linux/oom.h index c696c265f019..f022f581ac29 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -48,7 +48,7 @@ struct oom_control { /* Used by oom implementation, do not set */ unsigned long totalpages; struct task_struct *chosen; - unsigned long chosen_points; + long chosen_points; /* Used to print the constraint info. */ enum oom_constraint constraint; @@ -107,7 +107,7 @@ static inline vm_fault_t check_stable_address_space(struct mm_struct *mm) bool __oom_reap_task_mm(struct mm_struct *mm); -extern unsigned long oom_badness(struct task_struct *p, +long oom_badness(struct task_struct *p, unsigned long totalpages); extern bool out_of_memory(struct oom_control *oc); diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h index c066fec5b74b..fff52ad370c1 100644 --- a/include/linux/pageblock-flags.h +++ b/include/linux/pageblock-flags.h @@ -56,35 +56,25 @@ struct page; unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn, - unsigned long end_bitidx, unsigned long mask); void set_pfnblock_flags_mask(struct page *page, unsigned long flags, unsigned long pfn, - unsigned long end_bitidx, unsigned long mask); /* Declarations for getting and setting flags. See mm/page_alloc.c */ -#define get_pageblock_flags_group(page, start_bitidx, end_bitidx) \ - get_pfnblock_flags_mask(page, page_to_pfn(page), \ - end_bitidx, \ - (1 << (end_bitidx - start_bitidx + 1)) - 1) -#define set_pageblock_flags_group(page, flags, start_bitidx, end_bitidx) \ - set_pfnblock_flags_mask(page, flags, page_to_pfn(page), \ - end_bitidx, \ - (1 << (end_bitidx - start_bitidx + 1)) - 1) - #ifdef CONFIG_COMPACTION #define get_pageblock_skip(page) \ - get_pageblock_flags_group(page, PB_migrate_skip, \ - PB_migrate_skip) + get_pfnblock_flags_mask(page, page_to_pfn(page), \ + (1 << (PB_migrate_skip))) #define clear_pageblock_skip(page) \ - set_pageblock_flags_group(page, 0, PB_migrate_skip, \ - PB_migrate_skip) + set_pfnblock_flags_mask(page, 0, page_to_pfn(page), \ + (1 << PB_migrate_skip)) #define set_pageblock_skip(page) \ - set_pageblock_flags_group(page, 1, PB_migrate_skip, \ - PB_migrate_skip) + set_pfnblock_flags_mask(page, (1 << PB_migrate_skip), \ + page_to_pfn(page), \ + (1 << PB_migrate_skip)) #else static inline bool get_pageblock_skip(struct page *page) { diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h index f75c307f346d..df54cd5b15db 100644 --- a/include/linux/pci-ats.h +++ b/include/linux/pci-ats.h @@ -28,6 +28,10 @@ int pci_enable_pri(struct pci_dev *pdev, u32 reqs); void pci_disable_pri(struct pci_dev *pdev); int pci_reset_pri(struct pci_dev *pdev); int pci_prg_resp_pasid_required(struct pci_dev *pdev); +bool pci_pri_supported(struct pci_dev *pdev); +#else +static inline bool pci_pri_supported(struct pci_dev *pdev) +{ return false; } #endif /* CONFIG_PCI_PRI */ #ifdef CONFIG_PCI_PASID diff --git a/include/linux/pci.h b/include/linux/pci.h index 34c1c4f45288..835530605c0d 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -179,7 +179,7 @@ static inline const char *pci_power_name(pci_power_t state) */ typedef unsigned int __bitwise pci_channel_state_t; -enum pci_channel_state { +enum { /* I/O channel is in normal state */ pci_channel_io_normal = (__force pci_channel_state_t) 1, @@ -432,6 +432,12 @@ struct pci_dev { * mappings to make sure they cannot access arbitrary memory. */ unsigned int untrusted:1; + /* + * Info from the platform, e.g., ACPI or device tree, may mark a + * device as "external-facing". An external-facing device is + * itself internal but devices downstream from it are external. + */ + unsigned int external_facing:1; unsigned int broken_intx_masking:1; /* INTx masking can't be used */ unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */ unsigned int irq_managed:1; @@ -486,6 +492,7 @@ struct pci_dev { #ifdef CONFIG_PCI_P2PDMA struct pci_p2pdma *p2pdma; #endif + u16 acs_cap; /* ACS Capability offset */ phys_addr_t rom; /* Physical address if not from BAR */ size_t romlen; /* Length if not from BAR */ char *driver_override; /* Driver name to force a match */ @@ -785,7 +792,7 @@ enum pci_ers_result { struct pci_error_handlers { /* PCI bus error detected on this device */ pci_ers_result_t (*error_detected)(struct pci_dev *dev, - enum pci_channel_state error); + pci_channel_state_t error); /* MMIO has been re-enabled, but not DMA */ pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev); @@ -1053,13 +1060,6 @@ void pci_sort_breadthfirst(void); /* Generic PCI functions exported to card drivers */ -enum pci_lost_interrupt_reason { - PCI_LOST_IRQ_NO_INFORMATION = 0, - PCI_LOST_IRQ_DISABLE_MSI, - PCI_LOST_IRQ_DISABLE_MSIX, - PCI_LOST_IRQ_DISABLE_ACPI, -}; -enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *dev); int pci_find_capability(struct pci_dev *dev, int cap); int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap); int pci_find_ext_capability(struct pci_dev *dev, int cap); @@ -2302,10 +2302,6 @@ int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off, struct device_node; struct irq_domain; struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus); -int pci_parse_request_of_pci_ranges(struct device *dev, - struct list_head *resources, - struct list_head *ib_resources, - struct resource **bus_range); /* Arch may override this (weak) */ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus); @@ -2313,14 +2309,6 @@ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus); #else /* CONFIG_OF */ static inline struct irq_domain * pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; } -static inline int -pci_parse_request_of_pci_ranges(struct device *dev, - struct list_head *resources, - struct list_head *ib_resources, - struct resource **bus_range) -{ - return -EINVAL; -} #endif /* CONFIG_OF */ static inline struct device_node * diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 0ad57693f392..1ab1e24bcbce 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2585,6 +2585,8 @@ #define PCI_VENDOR_ID_ASMEDIA 0x1b21 +#define PCI_VENDOR_ID_REDHAT 0x1b36 + #define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS 0x1c36 #define PCI_VENDOR_ID_CIRCUITCO 0x1cc8 @@ -2659,6 +2661,8 @@ #define PCI_DEVICE_ID_INTEL_80332_1 0x0332 #define PCI_DEVICE_ID_INTEL_80333_0 0x0370 #define PCI_DEVICE_ID_INTEL_80333_1 0x0372 +#define PCI_DEVICE_ID_INTEL_QAT_DH895XCC 0x0435 +#define PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF 0x0443 #define PCI_DEVICE_ID_INTEL_82375 0x0482 #define PCI_DEVICE_ID_INTEL_82424 0x0483 #define PCI_DEVICE_ID_INTEL_82378 0x0484 @@ -2708,6 +2712,8 @@ #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI 0x1577 #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE 0x1578 #define PCI_DEVICE_ID_INTEL_80960_RP 0x1960 +#define PCI_DEVICE_ID_INTEL_QAT_C3XXX 0x19e2 +#define PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF 0x19e3 #define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21 #define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30 #define PCI_DEVICE_ID_INTEL_IOAT 0x1a38 @@ -2924,6 +2930,8 @@ #define PCI_DEVICE_ID_INTEL_IOAT_JSF7 0x3717 #define PCI_DEVICE_ID_INTEL_IOAT_JSF8 0x3718 #define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719 +#define PCI_DEVICE_ID_INTEL_QAT_C62X 0x37c8 +#define PCI_DEVICE_ID_INTEL_QAT_C62X_VF 0x37c9 #define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14 #define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16 #define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18 diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 0a4f54dd4737..01861eebed79 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -44,6 +44,7 @@ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch); s64 __percpu_counter_sum(struct percpu_counter *fbc); int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); +void percpu_counter_sync(struct percpu_counter *fbc); static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) { @@ -172,6 +173,9 @@ static inline bool percpu_counter_initialized(struct percpu_counter *fbc) return true; } +static inline void percpu_counter_sync(struct percpu_counter *fbc) +{ +} #endif /* CONFIG_SMP */ static inline void percpu_counter_inc(struct percpu_counter *fbc) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 56c1e8eb7bb0..a124c21e3204 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -647,40 +647,6 @@ static inline int arch_unmap_one(struct mm_struct *mm, #define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) #endif -#ifndef pgprot_nx -#define pgprot_nx(prot) (prot) -#endif - -#ifndef pgprot_noncached -#define pgprot_noncached(prot) (prot) -#endif - -#ifndef pgprot_writecombine -#define pgprot_writecombine pgprot_noncached -#endif - -#ifndef pgprot_writethrough -#define pgprot_writethrough pgprot_noncached -#endif - -#ifndef pgprot_device -#define pgprot_device pgprot_noncached -#endif - -#ifndef pgprot_modify -#define pgprot_modify pgprot_modify -static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) -{ - if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot))) - newprot = pgprot_noncached(newprot); - if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot))) - newprot = pgprot_writecombine(newprot); - if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot))) - newprot = pgprot_device(newprot); - return newprot; -} -#endif - /* * When walking page tables, get the address of the next boundary, * or the end address of the range if that comes earlier. Although no @@ -838,8 +804,45 @@ static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, /* * No-op macros that just return the current protection value. Defined here - * because these macros can be used used even if CONFIG_MMU is not defined. + * because these macros can be used even if CONFIG_MMU is not defined. */ + +#ifndef pgprot_nx +#define pgprot_nx(prot) (prot) +#endif + +#ifndef pgprot_noncached +#define pgprot_noncached(prot) (prot) +#endif + +#ifndef pgprot_writecombine +#define pgprot_writecombine pgprot_noncached +#endif + +#ifndef pgprot_writethrough +#define pgprot_writethrough pgprot_noncached +#endif + +#ifndef pgprot_device +#define pgprot_device pgprot_noncached +#endif + +#ifdef CONFIG_MMU +#ifndef pgprot_modify +#define pgprot_modify pgprot_modify +static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) +{ + if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot))) + newprot = pgprot_noncached(newprot); + if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot))) + newprot = pgprot_writecombine(newprot); + if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot))) + newprot = pgprot_device(newprot); + return newprot; +} +#endif +#endif /* CONFIG_MMU */ + #ifndef pgprot_encrypted #define pgprot_encrypted(prot) (prot) #endif @@ -1231,7 +1234,7 @@ static inline int pmd_trans_unstable(pmd_t *pmd) * Technically a PTE can be PROTNONE even when not doing NUMA balancing but * the only case the kernel cares is for NUMA balancing and is only ever set * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked - * _PAGE_PROTNONE so by by default, implement the helper as "always no". It + * _PAGE_PROTNONE so by default, implement the helper as "always no". It * is the responsibility of the caller to distinguish between PROT_NONE * protections and NUMA hinting fault protections. */ @@ -1315,10 +1318,10 @@ static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) /* * ARCHes with special requirements for evicting THP backing TLB entries can * implement this. Otherwise also, it can help optimize normal TLB flush in - * THP regime. stock flush_tlb_range() typically has optimization to nuke the - * entire TLB TLB if flush span is greater than a threshold, which will - * likely be true for a single huge page. Thus a single thp flush will - * invalidate the entire TLB which is not desitable. + * THP regime. Stock flush_tlb_range() typically has optimization to nuke the + * entire TLB if flush span is greater than a threshold, which will + * likely be true for a single huge page. Thus a single THP flush will + * invalidate the entire TLB which is not desirable. * e.g. see arch/arc: flush_pmd_tlb_range */ #define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index 91e77f53414d..1fcfe9e63cb9 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -4917,15 +4917,26 @@ struct ec_response_usb_pd_control_v1 { #define USBC_PD_CC_UFP_ATTACHED 4 /* UFP attached to usbc */ #define USBC_PD_CC_DFP_ATTACHED 5 /* DPF attached to usbc */ +/* Active/Passive Cable */ +#define USB_PD_CTRL_ACTIVE_CABLE BIT(0) +/* Optical/Non-optical cable */ +#define USB_PD_CTRL_OPTICAL_CABLE BIT(1) +/* 3rd Gen TBT device (or AMA)/2nd gen tbt Adapter */ +#define USB_PD_CTRL_TBT_LEGACY_ADAPTER BIT(2) +/* Active Link Uni-Direction */ +#define USB_PD_CTRL_ACTIVE_LINK_UNIDIR BIT(3) + struct ec_response_usb_pd_control_v2 { uint8_t enabled; uint8_t role; uint8_t polarity; char state[32]; - uint8_t cc_state; /* USBC_PD_CC_*Encoded cc state */ - uint8_t dp_mode; /* Current DP pin mode (MODE_DP_PIN_[A-E]) */ - /* CL:1500994 Current cable type */ - uint8_t reserved_cable_type; + uint8_t cc_state; /* enum pd_cc_states representing cc state */ + uint8_t dp_mode; /* Current DP pin mode (MODE_DP_PIN_[A-E]) */ + uint8_t reserved; /* Reserved for future use */ + uint8_t control_flags; /* USB_PD_CTRL_*flags */ + uint8_t cable_speed; /* TBT_SS_* cable speed */ + uint8_t cable_gen; /* TBT_GEN3_* cable rounded support */ } __ec_align1; #define EC_CMD_USB_PD_PORTS 0x0102 @@ -5207,11 +5218,15 @@ struct ec_params_usb_pd_mux_info { } __ec_align1; /* Flags representing mux state */ -#define USB_PD_MUX_USB_ENABLED BIT(0) /* USB connected */ -#define USB_PD_MUX_DP_ENABLED BIT(1) /* DP connected */ -#define USB_PD_MUX_POLARITY_INVERTED BIT(2) /* CC line Polarity inverted */ -#define USB_PD_MUX_HPD_IRQ BIT(3) /* HPD IRQ is asserted */ -#define USB_PD_MUX_HPD_LVL BIT(4) /* HPD level is asserted */ +#define USB_PD_MUX_NONE 0 /* Open switch */ +#define USB_PD_MUX_USB_ENABLED BIT(0) /* USB connected */ +#define USB_PD_MUX_DP_ENABLED BIT(1) /* DP connected */ +#define USB_PD_MUX_POLARITY_INVERTED BIT(2) /* CC line Polarity inverted */ +#define USB_PD_MUX_HPD_IRQ BIT(3) /* HPD IRQ is asserted */ +#define USB_PD_MUX_HPD_LVL BIT(4) /* HPD level is asserted */ +#define USB_PD_MUX_SAFE_MODE BIT(5) /* DP is in safe mode */ +#define USB_PD_MUX_TBT_COMPAT_ENABLED BIT(6) /* TBT compat enabled */ +#define USB_PD_MUX_USB4_ENABLED BIT(7) /* USB4 enabled */ struct ec_response_usb_pd_mux_info { uint8_t flags; /* USB_PD_MUX_*-encoded USB mux state */ diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h index 383243326676..4a415ae851ef 100644 --- a/include/linux/platform_data/cros_ec_proto.h +++ b/include/linux/platform_data/cros_ec_proto.h @@ -216,9 +216,6 @@ int cros_ec_prepare_tx(struct cros_ec_device *ec_dev, int cros_ec_check_result(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); -int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, - struct cros_ec_command *msg); - int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); diff --git a/include/linux/platform_data/davinci-cpufreq.h b/include/linux/platform_data/davinci-cpufreq.h index 3fbf9f2793b5..bc208c64e3d7 100644 --- a/include/linux/platform_data/davinci-cpufreq.h +++ b/include/linux/platform_data/davinci-cpufreq.h @@ -2,7 +2,7 @@ /* * TI DaVinci CPUFreq platform support. * - * Copyright (C) 2009 Texas Instruments, Inc. http://www.ti.com/ + * Copyright (C) 2009 Texas Instruments, Inc. https://www.ti.com/ */ #ifndef _MACH_DAVINCI_CPUFREQ_H diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h index 7fe80f1c7e08..5d1fb0d78a22 100644 --- a/include/linux/platform_data/davinci_asp.h +++ b/include/linux/platform_data/davinci_asp.h @@ -1,7 +1,7 @@ /* * TI DaVinci Audio Serial Port support * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index f3eaf9ec00a1..fbbeb2f6189b 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h @@ -8,10 +8,15 @@ #ifndef _PLATFORM_DATA_DMA_DW_H #define _PLATFORM_DATA_DMA_DW_H -#include <linux/device.h> +#include <linux/bits.h> +#include <linux/types.h> #define DW_DMA_MAX_NR_MASTERS 4 #define DW_DMA_MAX_NR_CHANNELS 8 +#define DW_DMA_MIN_BURST 1 +#define DW_DMA_MAX_BURST 256 + +struct device; /** * struct dw_dma_slave - Controller-specific information about a slave @@ -42,6 +47,8 @@ struct dw_dma_slave { * @data_width: Maximum data width supported by hardware per AHB master * (in bytes, power of 2) * @multi_block: Multi block transfers supported by hardware per channel. + * @max_burst: Maximum value of burst transaction size supported by hardware + * per channel (in units of CTL.SRC_TR_WIDTH/CTL.DST_TR_WIDTH). * @protctl: Protection control signals setting per channel. */ struct dw_dma_platform_data { @@ -56,6 +63,7 @@ struct dw_dma_platform_data { unsigned char nr_masters; unsigned char data_width[DW_DMA_MAX_NR_MASTERS]; unsigned char multi_block[DW_DMA_MAX_NR_CHANNELS]; + u32 max_burst[DW_DMA_MAX_NR_CHANNELS]; #define CHAN_PROTCTL_PRIVILEGED BIT(0) #define CHAN_PROTCTL_BUFFERABLE BIT(1) #define CHAN_PROTCTL_CACHEABLE BIT(2) diff --git a/include/linux/platform_data/elm.h b/include/linux/platform_data/elm.h index 0f491d8abfdd..3cc78f0447b1 100644 --- a/include/linux/platform_data/elm.h +++ b/include/linux/platform_data/elm.h @@ -2,7 +2,7 @@ /* * BCH Error Location Module * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ */ #ifndef __ELM_H diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h index a93841bfb9f7..e182a46e609f 100644 --- a/include/linux/platform_data/gpio-davinci.h +++ b/include/linux/platform_data/gpio-davinci.h @@ -1,7 +1,7 @@ /* * DaVinci GPIO Platform Related Defines * - * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/include/linux/platform_data/gpmc-omap.h b/include/linux/platform_data/gpmc-omap.h index ef663e570552..c9cc4e32435d 100644 --- a/include/linux/platform_data/gpmc-omap.h +++ b/include/linux/platform_data/gpmc-omap.h @@ -2,7 +2,7 @@ /* * OMAP GPMC Platform data * - * Copyright (C) 2014 Texas Instruments, Inc. - http://www.ti.com + * Copyright (C) 2014 Texas Instruments, Inc. - https://www.ti.com * Roger Quadros <rogerq@ti.com> */ diff --git a/include/linux/platform_data/media/omap1_camera.h b/include/linux/platform_data/media/omap1_camera.h deleted file mode 100644 index 386439db68de..000000000000 --- a/include/linux/platform_data/media/omap1_camera.h +++ /dev/null @@ -1,32 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Header for V4L2 SoC Camera driver for OMAP1 Camera Interface - * - * Copyright (C) 2010, Janusz Krzysztofik <jkrzyszt@tis.icnet.pl> - */ - -#ifndef __MEDIA_OMAP1_CAMERA_H_ -#define __MEDIA_OMAP1_CAMERA_H_ - -#include <linux/bitops.h> - -#define OMAP1_CAMERA_IOSIZE 0x1c - -enum omap1_cam_vb_mode { - OMAP1_CAM_DMA_CONTIG = 0, - OMAP1_CAM_DMA_SG, -}; - -#define OMAP1_CAMERA_MIN_BUF_COUNT(x) ((x) == OMAP1_CAM_DMA_CONTIG ? 3 : 2) - -struct omap1_cam_platform_data { - unsigned long camexclk_khz; - unsigned long lclk_khz_max; - unsigned long flags; -}; - -#define OMAP1_CAMERA_LCLK_RISING BIT(0) -#define OMAP1_CAMERA_RST_LOW BIT(1) -#define OMAP1_CAMERA_RST_HIGH BIT(2) - -#endif /* __MEDIA_OMAP1_CAMERA_H_ */ diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h index 9cffa9a64ab3..1af9c01563f9 100644 --- a/include/linux/platform_data/mlxreg.h +++ b/include/linux/platform_data/mlxreg.h @@ -43,10 +43,13 @@ * * TYPE1 HW watchdog implementation exist in old systems. * All new systems have TYPE2 HW watchdog. + * TYPE3 HW watchdog can exist on all systems with new CPLD. + * TYPE3 is selected by WD capability bit. */ enum mlxreg_wdt_type { MLX_WDT_TYPE1, MLX_WDT_TYPE2, + MLX_WDT_TYPE3, }; /** @@ -93,7 +96,7 @@ struct mlxreg_core_data { umode_t mode; struct device_node *np; struct mlxreg_hotplug_device hpdev; - u8 health_cntr; + u32 health_cntr; bool attached; u8 regnum; }; diff --git a/include/linux/platform_data/mtd-davinci-aemif.h b/include/linux/platform_data/mtd-davinci-aemif.h index a403dd51dacc..a49826214a39 100644 --- a/include/linux/platform_data/mtd-davinci-aemif.h +++ b/include/linux/platform_data/mtd-davinci-aemif.h @@ -1,7 +1,7 @@ /* * TI DaVinci AEMIF support * - * Copyright 2010 (C) Texas Instruments, Inc. http://www.ti.com/ + * Copyright 2010 (C) Texas Instruments, Inc. https://www.ti.com/ * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any diff --git a/include/linux/platform_data/omap-twl4030.h b/include/linux/platform_data/omap-twl4030.h index 8419c8caf54e..0dd851ea1c72 100644 --- a/include/linux/platform_data/omap-twl4030.h +++ b/include/linux/platform_data/omap-twl4030.h @@ -3,7 +3,7 @@ * omap-twl4030.h - ASoC machine driver for TI SoC based boards with twl4030 * codec, header. * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com * All rights reserved. * * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> diff --git a/include/linux/platform_data/sky81452-backlight.h b/include/linux/platform_data/sky81452-backlight.h deleted file mode 100644 index 02653d92d84f..000000000000 --- a/include/linux/platform_data/sky81452-backlight.h +++ /dev/null @@ -1,35 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * sky81452.h SKY81452 backlight driver - * - * Copyright 2014 Skyworks Solutions Inc. - * Author : Gyungoh Yoo <jack.yoo@skyworksinc.com> - */ - -#ifndef _SKY81452_BACKLIGHT_H -#define _SKY81452_BACKLIGHT_H - -/** - * struct sky81452_platform_data - * @name: backlight driver name. - If it is not defined, default name is lcd-backlight. - * @gpio_enable:GPIO number which control EN pin - * @enable: Enable mask for current sink channel 1, 2, 3, 4, 5 and 6. - * @ignore_pwm: true if DPWMI should be ignored. - * @dpwm_mode: true is DPWM dimming mode, otherwise Analog dimming mode. - * @phase_shift:true is phase shift mode. - * @short_detecion_threshold: It should be one of 4, 5, 6 and 7V. - * @boost_current_limit: It should be one of 2300, 2750mA. - */ -struct sky81452_bl_platform_data { - const char *name; - int gpio_enable; - unsigned int enable; - bool ignore_pwm; - bool dpwm_mode; - bool phase_shift; - unsigned int short_detection_threshold; - unsigned int boost_current_limit; -}; - -#endif diff --git a/include/linux/platform_data/uio_pruss.h b/include/linux/platform_data/uio_pruss.h index 3d47d219827f..31f2e22661bc 100644 --- a/include/linux/platform_data/uio_pruss.h +++ b/include/linux/platform_data/uio_pruss.h @@ -3,7 +3,7 @@ * * Platform data for uio_pruss driver * - * Copyright (C) 2010-11 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2010-11 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/include/linux/platform_data/usb-omap.h b/include/linux/platform_data/usb-omap.h index fa579b4c666b..5e70d667031c 100644 --- a/include/linux/platform_data/usb-omap.h +++ b/include/linux/platform_data/usb-omap.h @@ -1,7 +1,7 @@ /* * usb-omap.h - Platform data for the various OMAP USB IPs * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com * * This software is distributed under the terms of the GNU General Public * License ("GPL") version 2, as published by the Free Software Foundation. diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index ee34c553f6bf..dbb484524f82 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -152,6 +152,7 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names void dev_pm_opp_detach_genpd(struct opp_table *opp_table); int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); +int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp); int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); void dev_pm_opp_remove_table(struct device *dev); @@ -343,6 +344,11 @@ static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_f return -ENOTSUPP; } +static inline int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp) +{ + return -EOPNOTSUPP; +} + static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask) { return -ENOTSUPP; diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 3dbc207bff53..6245caa18034 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -60,58 +60,151 @@ extern void pm_runtime_put_suppliers(struct device *dev); extern void pm_runtime_new_link(struct device *dev); extern void pm_runtime_drop_link(struct device *dev); +/** + * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter. + * @dev: Target device. + * + * Increment the runtime PM usage counter of @dev if its runtime PM status is + * %RPM_ACTIVE and its runtime PM usage counter is greater than 0. + */ static inline int pm_runtime_get_if_in_use(struct device *dev) { return pm_runtime_get_if_active(dev, false); } +/** + * pm_suspend_ignore_children - Set runtime PM behavior regarding children. + * @dev: Target device. + * @enable: Whether or not to ignore possible dependencies on children. + * + * The dependencies of @dev on its children will not be taken into account by + * the runtime PM framework going forward if @enable is %true, or they will + * be taken into account otherwise. + */ static inline void pm_suspend_ignore_children(struct device *dev, bool enable) { dev->power.ignore_children = enable; } +/** + * pm_runtime_get_noresume - Bump up runtime PM usage counter of a device. + * @dev: Target device. + */ static inline void pm_runtime_get_noresume(struct device *dev) { atomic_inc(&dev->power.usage_count); } +/** + * pm_runtime_put_noidle - Drop runtime PM usage counter of a device. + * @dev: Target device. + * + * Decrement the runtime PM usage counter of @dev unless it is 0 already. + */ static inline void pm_runtime_put_noidle(struct device *dev) { atomic_add_unless(&dev->power.usage_count, -1, 0); } +/** + * pm_runtime_suspended - Check whether or not a device is runtime-suspended. + * @dev: Target device. + * + * Return %true if runtime PM is enabled for @dev and its runtime PM status is + * %RPM_SUSPENDED, or %false otherwise. + * + * Note that the return value of this function can only be trusted if it is + * called under the runtime PM lock of @dev or under conditions in which + * runtime PM cannot be either disabled or enabled for @dev and its runtime PM + * status cannot change. + */ static inline bool pm_runtime_suspended(struct device *dev) { return dev->power.runtime_status == RPM_SUSPENDED && !dev->power.disable_depth; } +/** + * pm_runtime_active - Check whether or not a device is runtime-active. + * @dev: Target device. + * + * Return %true if runtime PM is enabled for @dev and its runtime PM status is + * %RPM_ACTIVE, or %false otherwise. + * + * Note that the return value of this function can only be trusted if it is + * called under the runtime PM lock of @dev or under conditions in which + * runtime PM cannot be either disabled or enabled for @dev and its runtime PM + * status cannot change. + */ static inline bool pm_runtime_active(struct device *dev) { return dev->power.runtime_status == RPM_ACTIVE || dev->power.disable_depth; } +/** + * pm_runtime_status_suspended - Check if runtime PM status is "suspended". + * @dev: Target device. + * + * Return %true if the runtime PM status of @dev is %RPM_SUSPENDED, or %false + * otherwise, regardless of whether or not runtime PM has been enabled for @dev. + * + * Note that the return value of this function can only be trusted if it is + * called under the runtime PM lock of @dev or under conditions in which the + * runtime PM status of @dev cannot change. + */ static inline bool pm_runtime_status_suspended(struct device *dev) { return dev->power.runtime_status == RPM_SUSPENDED; } +/** + * pm_runtime_enabled - Check if runtime PM is enabled. + * @dev: Target device. + * + * Return %true if runtime PM is enabled for @dev or %false otherwise. + * + * Note that the return value of this function can only be trusted if it is + * called under the runtime PM lock of @dev or under conditions in which + * runtime PM cannot be either disabled or enabled for @dev. + */ static inline bool pm_runtime_enabled(struct device *dev) { return !dev->power.disable_depth; } +/** + * pm_runtime_has_no_callbacks - Check if runtime PM callbacks may be present. + * @dev: Target device. + * + * Return %true if @dev is a special device without runtime PM callbacks or + * %false otherwise. + */ static inline bool pm_runtime_has_no_callbacks(struct device *dev) { return dev->power.no_callbacks; } +/** + * pm_runtime_mark_last_busy - Update the last access time of a device. + * @dev: Target device. + * + * Update the last access time of @dev used by the runtime PM autosuspend + * mechanism to the current time as returned by ktime_get_mono_fast_ns(). + */ static inline void pm_runtime_mark_last_busy(struct device *dev) { WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns()); } +/** + * pm_runtime_is_irq_safe - Check if runtime PM can work in interrupt context. + * @dev: Target device. + * + * Return %true if @dev has been marked as an "IRQ-safe" device (with respect + * to runtime PM), in which case its runtime PM callabcks can be expected to + * work correctly when invoked from interrupt handlers. + */ static inline bool pm_runtime_is_irq_safe(struct device *dev) { return dev->power.irq_safe; @@ -191,97 +284,250 @@ static inline void pm_runtime_drop_link(struct device *dev) {} #endif /* !CONFIG_PM */ +/** + * pm_runtime_idle - Conditionally set up autosuspend of a device or suspend it. + * @dev: Target device. + * + * Invoke the "idle check" callback of @dev and, depending on its return value, + * set up autosuspend of @dev or suspend it (depending on whether or not + * autosuspend has been enabled for it). + */ static inline int pm_runtime_idle(struct device *dev) { return __pm_runtime_idle(dev, 0); } +/** + * pm_runtime_suspend - Suspend a device synchronously. + * @dev: Target device. + */ static inline int pm_runtime_suspend(struct device *dev) { return __pm_runtime_suspend(dev, 0); } +/** + * pm_runtime_autosuspend - Set up autosuspend of a device or suspend it. + * @dev: Target device. + * + * Set up autosuspend of @dev or suspend it (depending on whether or not + * autosuspend is enabled for it) without engaging its "idle check" callback. + */ static inline int pm_runtime_autosuspend(struct device *dev) { return __pm_runtime_suspend(dev, RPM_AUTO); } +/** + * pm_runtime_resume - Resume a device synchronously. + * @dev: Target device. + */ static inline int pm_runtime_resume(struct device *dev) { return __pm_runtime_resume(dev, 0); } +/** + * pm_request_idle - Queue up "idle check" execution for a device. + * @dev: Target device. + * + * Queue up a work item to run an equivalent of pm_runtime_idle() for @dev + * asynchronously. + */ static inline int pm_request_idle(struct device *dev) { return __pm_runtime_idle(dev, RPM_ASYNC); } +/** + * pm_request_resume - Queue up runtime-resume of a device. + * @dev: Target device. + */ static inline int pm_request_resume(struct device *dev) { return __pm_runtime_resume(dev, RPM_ASYNC); } +/** + * pm_request_autosuspend - Queue up autosuspend of a device. + * @dev: Target device. + * + * Queue up a work item to run an equivalent pm_runtime_autosuspend() for @dev + * asynchronously. + */ static inline int pm_request_autosuspend(struct device *dev) { return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO); } +/** + * pm_runtime_get - Bump up usage counter and queue up resume of a device. + * @dev: Target device. + * + * Bump up the runtime PM usage counter of @dev and queue up a work item to + * carry out runtime-resume of it. + */ static inline int pm_runtime_get(struct device *dev) { return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC); } +/** + * pm_runtime_get_sync - Bump up usage counter of a device and resume it. + * @dev: Target device. + * + * Bump up the runtime PM usage counter of @dev and carry out runtime-resume of + * it synchronously. + * + * The possible return values of this function are the same as for + * pm_runtime_resume() and the runtime PM usage counter of @dev remains + * incremented in all cases, even if it returns an error code. + */ static inline int pm_runtime_get_sync(struct device *dev) { return __pm_runtime_resume(dev, RPM_GET_PUT); } +/** + * pm_runtime_put - Drop device usage counter and queue up "idle check" if 0. + * @dev: Target device. + * + * Decrement the runtime PM usage counter of @dev and if it turns out to be + * equal to 0, queue up a work item for @dev like in pm_request_idle(). + */ static inline int pm_runtime_put(struct device *dev) { return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC); } +/** + * pm_runtime_put_autosuspend - Drop device usage counter and queue autosuspend if 0. + * @dev: Target device. + * + * Decrement the runtime PM usage counter of @dev and if it turns out to be + * equal to 0, queue up a work item for @dev like in pm_request_autosuspend(). + */ static inline int pm_runtime_put_autosuspend(struct device *dev) { return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_ASYNC | RPM_AUTO); } +/** + * pm_runtime_put_sync - Drop device usage counter and run "idle check" if 0. + * @dev: Target device. + * + * Decrement the runtime PM usage counter of @dev and if it turns out to be + * equal to 0, invoke the "idle check" callback of @dev and, depending on its + * return value, set up autosuspend of @dev or suspend it (depending on whether + * or not autosuspend has been enabled for it). + * + * The possible return values of this function are the same as for + * pm_runtime_idle() and the runtime PM usage counter of @dev remains + * decremented in all cases, even if it returns an error code. + */ static inline int pm_runtime_put_sync(struct device *dev) { return __pm_runtime_idle(dev, RPM_GET_PUT); } +/** + * pm_runtime_put_sync_suspend - Drop device usage counter and suspend if 0. + * @dev: Target device. + * + * Decrement the runtime PM usage counter of @dev and if it turns out to be + * equal to 0, carry out runtime-suspend of @dev synchronously. + * + * The possible return values of this function are the same as for + * pm_runtime_suspend() and the runtime PM usage counter of @dev remains + * decremented in all cases, even if it returns an error code. + */ static inline int pm_runtime_put_sync_suspend(struct device *dev) { return __pm_runtime_suspend(dev, RPM_GET_PUT); } +/** + * pm_runtime_put_sync_autosuspend - Drop device usage counter and autosuspend if 0. + * @dev: Target device. + * + * Decrement the runtime PM usage counter of @dev and if it turns out to be + * equal to 0, set up autosuspend of @dev or suspend it synchronously (depending + * on whether or not autosuspend has been enabled for it). + * + * The possible return values of this function are the same as for + * pm_runtime_autosuspend() and the runtime PM usage counter of @dev remains + * decremented in all cases, even if it returns an error code. + */ static inline int pm_runtime_put_sync_autosuspend(struct device *dev) { return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO); } +/** + * pm_runtime_set_active - Set runtime PM status to "active". + * @dev: Target device. + * + * Set the runtime PM status of @dev to %RPM_ACTIVE and ensure that dependencies + * of it will be taken into account. + * + * It is not valid to call this function for devices with runtime PM enabled. + */ static inline int pm_runtime_set_active(struct device *dev) { return __pm_runtime_set_status(dev, RPM_ACTIVE); } +/** + * pm_runtime_set_suspended - Set runtime PM status to "active". + * @dev: Target device. + * + * Set the runtime PM status of @dev to %RPM_SUSPENDED and ensure that + * dependencies of it will be taken into account. + * + * It is not valid to call this function for devices with runtime PM enabled. + */ static inline int pm_runtime_set_suspended(struct device *dev) { return __pm_runtime_set_status(dev, RPM_SUSPENDED); } +/** + * pm_runtime_disable - Disable runtime PM for a device. + * @dev: Target device. + * + * Prevent the runtime PM framework from working with @dev (by incrementing its + * "blocking" counter). + * + * For each invocation of this function for @dev there must be a matching + * pm_runtime_enable() call in order for runtime PM to be enabled for it. + */ static inline void pm_runtime_disable(struct device *dev) { __pm_runtime_disable(dev, true); } +/** + * pm_runtime_use_autosuspend - Allow autosuspend to be used for a device. + * @dev: Target device. + * + * Allow the runtime PM autosuspend mechanism to be used for @dev whenever + * requested (or "autosuspend" will be handled as direct runtime-suspend for + * it). + */ static inline void pm_runtime_use_autosuspend(struct device *dev) { __pm_runtime_use_autosuspend(dev, true); } +/** + * pm_runtime_dont_use_autosuspend - Prevent autosuspend from being used. + * @dev: Target device. + * + * Prevent the runtime PM autosuspend mechanism from being used for @dev which + * means that "autosuspend" will be handled as direct runtime-suspend for it + * going forward. + */ static inline void pm_runtime_dont_use_autosuspend(struct device *dev) { __pm_runtime_use_autosuspend(dev, false); diff --git a/include/linux/poison.h b/include/linux/poison.h index df34330b4e34..dc8ae5d8db03 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -24,10 +24,6 @@ #define LIST_POISON2 ((void *) 0x122 + POISON_POINTER_DELTA) /********** include/linux/timer.h **********/ -/* - * Magic number "tsta" to indicate a static timer initializer - * for the object debugging code. - */ #define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA) /********** mm/page_poison.c **********/ diff --git a/include/linux/power/bq2415x_charger.h b/include/linux/power/bq2415x_charger.h index 4ca08321e251..f3c267f2a467 100644 --- a/include/linux/power/bq2415x_charger.h +++ b/include/linux/power/bq2415x_charger.h @@ -14,8 +14,8 @@ * value is -1 then default chip value (specified in datasheet) will be * used. * - * Value resistor_sense is needed for for configuring charge and - * termination current. It it is less or equal to zero, configuring charge + * Value resistor_sense is needed for configuring charge and + * termination current. If it is less or equal to zero, configuring charge * and termination current will not be possible. * * For automode support is needed to provide name of power supply device diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h index 507c5e214c42..987d9652aa4e 100644 --- a/include/linux/power/bq27xxx_battery.h +++ b/include/linux/power/bq27xxx_battery.h @@ -30,6 +30,8 @@ enum bq27xxx_chip { BQ27426, BQ27441, BQ27621, + BQ27Z561, + BQ28Z610, }; struct bq27xxx_device_info; diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index ac1345a48ad0..97cc4b85bf61 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -48,6 +48,7 @@ enum { POWER_SUPPLY_CHARGE_TYPE_STANDARD, /* normal speed */ POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE, /* dynamically adjusted speed */ POWER_SUPPLY_CHARGE_TYPE_CUSTOM, /* use CHARGE_CONTROL_* props */ + POWER_SUPPLY_CHARGE_TYPE_LONGLIFE, /* slow speed, longer life */ }; enum { @@ -62,6 +63,9 @@ enum { POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE, POWER_SUPPLY_HEALTH_OVERCURRENT, POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED, + POWER_SUPPLY_HEALTH_WARM, + POWER_SUPPLY_HEALTH_COOL, + POWER_SUPPLY_HEALTH_HOT, }; enum { diff --git a/include/linux/property.h b/include/linux/property.h index 10d03572f52e..9f805c442819 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -389,6 +389,11 @@ struct fwnode_handle * fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port, u32 endpoint); +static inline bool fwnode_graph_is_endpoint(struct fwnode_handle *fwnode) +{ + return fwnode_property_present(fwnode, "remote-endpoint"); +} + /* * Fwnode lookup flags * diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 3d6a24697761..2e1193a3fb5f 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -44,6 +44,13 @@ enum qcom_scm_sec_dev_id { QCOM_SCM_ICE_DEV_ID = 20, }; +enum qcom_scm_ice_cipher { + QCOM_SCM_ICE_CIPHER_AES_128_XTS = 0, + QCOM_SCM_ICE_CIPHER_AES_128_CBC = 1, + QCOM_SCM_ICE_CIPHER_AES_256_XTS = 3, + QCOM_SCM_ICE_CIPHER_AES_256_CBC = 4, +}; + #define QCOM_SCM_VMID_HLOS 0x3 #define QCOM_SCM_VMID_MSS_MSA 0xF #define QCOM_SCM_VMID_WLAN 0x18 @@ -88,6 +95,12 @@ extern int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, extern int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size); +extern bool qcom_scm_ice_available(void); +extern int qcom_scm_ice_invalidate_key(u32 index); +extern int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, + enum qcom_scm_ice_cipher cipher, + u32 data_unit_size); + extern bool qcom_scm_hdcp_available(void); extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp); @@ -138,6 +151,12 @@ static inline int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, static inline int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size) { return -ENODEV; } +static inline bool qcom_scm_ice_available(void) { return false; } +static inline int qcom_scm_ice_invalidate_key(u32 index) { return -ENODEV; } +static inline int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, + enum qcom_scm_ice_cipher cipher, + u32 data_unit_size) { return -ENODEV; } + static inline bool qcom_scm_hdcp_available(void) { return false; } static inline int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) { return -ENODEV; } diff --git a/include/linux/raid/detect.h b/include/linux/raid/detect.h index 37dd3f40cd31..1f029a71c3ef 100644 --- a/include/linux/raid/detect.h +++ b/include/linux/raid/detect.h @@ -1,3 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ void md_autodetect_dev(dev_t dev); + +#ifdef CONFIG_BLK_DEV_MD +void md_run_setup(void); +#else +static inline void md_run_setup(void) +{ +} +#endif diff --git a/include/linux/raid/md_u.h b/include/linux/raid/md_u.h deleted file mode 100644 index 8dfec085a20e..000000000000 --- a/include/linux/raid/md_u.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - md_u.h : user <=> kernel API between Linux raidtools and RAID drivers - Copyright (C) 1998 Ingo Molnar - -*/ -#ifndef _MD_U_H -#define _MD_U_H - -#include <uapi/linux/raid/md_u.h> - -extern int mdp_major; -#endif diff --git a/include/linux/regset.h b/include/linux/regset.h index 46d6ae68c455..c3403f328257 100644 --- a/include/linux/regset.h +++ b/include/linux/regset.h @@ -17,6 +17,52 @@ struct task_struct; struct user_regset; +struct membuf { + void *p; + size_t left; +}; + +static inline int membuf_zero(struct membuf *s, size_t size) +{ + if (s->left) { + if (size > s->left) + size = s->left; + memset(s->p, 0, size); + s->p += size; + s->left -= size; + } + return s->left; +} + +static inline int membuf_write(struct membuf *s, const void *v, size_t size) +{ + if (s->left) { + if (size > s->left) + size = s->left; + memcpy(s->p, v, size); + s->p += size; + s->left -= size; + } + return s->left; +} + +/* current s->p must be aligned for v; v must be a scalar */ +#define membuf_store(s, v) \ +({ \ + struct membuf *__s = (s); \ + if (__s->left) { \ + typeof(v) __v = (v); \ + size_t __size = sizeof(__v); \ + if (unlikely(__size > __s->left)) { \ + __size = __s->left; \ + memcpy(__s->p, &__v, __size); \ + } else { \ + *(typeof(__v + 0) *)__s->p = __v; \ + } \ + __s->p += __size; \ + __s->left -= __size; \ + } \ + __s->left;}) /** * user_regset_active_fn - type of @active function in &struct user_regset @@ -36,26 +82,9 @@ struct user_regset; typedef int user_regset_active_fn(struct task_struct *target, const struct user_regset *regset); -/** - * user_regset_get_fn - type of @get function in &struct user_regset - * @target: thread being examined - * @regset: regset being examined - * @pos: offset into the regset data to access, in bytes - * @count: amount of data to copy, in bytes - * @kbuf: if not %NULL, a kernel-space pointer to copy into - * @ubuf: if @kbuf is %NULL, a user-space pointer to copy into - * - * Fetch register values. Return %0 on success; -%EIO or -%ENODEV - * are usual failure returns. The @pos and @count values are in - * bytes, but must be properly aligned. If @kbuf is non-null, that - * buffer is used and @ubuf is ignored. If @kbuf is %NULL, then - * ubuf gives a userland pointer to access directly, and an -%EFAULT - * return value is possible. - */ -typedef int user_regset_get_fn(struct task_struct *target, +typedef int user_regset_get2_fn(struct task_struct *target, const struct user_regset *regset, - unsigned int pos, unsigned int count, - void *kbuf, void __user *ubuf); + struct membuf to); /** * user_regset_set_fn - type of @set function in &struct user_regset @@ -104,28 +133,6 @@ typedef int user_regset_writeback_fn(struct task_struct *target, int immediate); /** - * user_regset_get_size_fn - type of @get_size function in &struct user_regset - * @target: thread being examined - * @regset: regset being examined - * - * This call is optional; usually the pointer is %NULL. - * - * When provided, this function must return the current size of regset - * data, as observed by the @get function in &struct user_regset. The - * value returned must be a multiple of @size. The returned size is - * required to be valid only until the next time (if any) @regset is - * modified for @target. - * - * This function is intended for dynamically sized regsets. A regset - * that is statically sized does not need to implement it. - * - * This function should not be called directly: instead, callers should - * call regset_size() to determine the current size of a regset. - */ -typedef unsigned int user_regset_get_size_fn(struct task_struct *target, - const struct user_regset *regset); - -/** * struct user_regset - accessible thread CPU state * @n: Number of slots (registers). * @size: Size in bytes of a slot (register). @@ -136,7 +143,6 @@ typedef unsigned int user_regset_get_size_fn(struct task_struct *target, * @set: Function to store values. * @active: Function to report if regset is active, or %NULL. * @writeback: Function to write data back to user memory, or %NULL. - * @get_size: Function to return the regset's size, or %NULL. * * This data structure describes a machine resource we call a register set. * This is part of the state of an individual thread, not necessarily @@ -144,12 +150,7 @@ typedef unsigned int user_regset_get_size_fn(struct task_struct *target, * similar slots, given by @n. Each slot is @size bytes, and aligned to * @align bytes (which is at least @size). For dynamically-sized * regsets, @n must contain the maximum possible number of slots for the - * regset, and @get_size must point to a function that returns the - * current regset size. - * - * Callers that need to know only the current size of the regset and do - * not care about its internal structure should call regset_size() - * instead of inspecting @n or calling @get_size. + * regset. * * For backward compatibility, the @get and @set methods must pad to, or * accept, @n * @size bytes, even if the current regset size is smaller. @@ -185,11 +186,10 @@ typedef unsigned int user_regset_get_size_fn(struct task_struct *target, * omitted when there is an @active function and it returns zero. */ struct user_regset { - user_regset_get_fn *get; + user_regset_get2_fn *regset_get; user_regset_set_fn *set; user_regset_active_fn *active; user_regset_writeback_fn *writeback; - user_regset_get_size_fn *get_size; unsigned int n; unsigned int size; unsigned int align; @@ -238,44 +238,6 @@ struct user_regset_view { */ const struct user_regset_view *task_user_regset_view(struct task_struct *tsk); - -/* - * These are helpers for writing regset get/set functions in arch code. - * Because @start_pos and @end_pos are always compile-time constants, - * these are inlined into very little code though they look large. - * - * Use one or more calls sequentially for each chunk of regset data stored - * contiguously in memory. Call with constants for @start_pos and @end_pos, - * giving the range of byte positions in the regset that data corresponds - * to; @end_pos can be -1 if this chunk is at the end of the regset layout. - * Each call updates the arguments to point past its chunk. - */ - -static inline int user_regset_copyout(unsigned int *pos, unsigned int *count, - void **kbuf, - void __user **ubuf, const void *data, - const int start_pos, const int end_pos) -{ - if (*count == 0) - return 0; - BUG_ON(*pos < start_pos); - if (end_pos < 0 || *pos < end_pos) { - unsigned int copy = (end_pos < 0 ? *count - : min(*count, end_pos - *pos)); - data += *pos - start_pos; - if (*kbuf) { - memcpy(*kbuf, data, copy); - *kbuf += copy; - } else if (__copy_to_user(*ubuf, data, copy)) - return -EFAULT; - else - *ubuf += copy; - *pos += copy; - *count -= copy; - } - return 0; -} - static inline int user_regset_copyin(unsigned int *pos, unsigned int *count, const void **kbuf, const void __user **ubuf, void *data, @@ -301,35 +263,6 @@ static inline int user_regset_copyin(unsigned int *pos, unsigned int *count, return 0; } -/* - * These two parallel the two above, but for portions of a regset layout - * that always read as all-zero or for which writes are ignored. - */ -static inline int user_regset_copyout_zero(unsigned int *pos, - unsigned int *count, - void **kbuf, void __user **ubuf, - const int start_pos, - const int end_pos) -{ - if (*count == 0) - return 0; - BUG_ON(*pos < start_pos); - if (end_pos < 0 || *pos < end_pos) { - unsigned int copy = (end_pos < 0 ? *count - : min(*count, end_pos - *pos)); - if (*kbuf) { - memset(*kbuf, 0, copy); - *kbuf += copy; - } else if (clear_user(*ubuf, copy)) - return -EFAULT; - else - *ubuf += copy; - *pos += copy; - *count -= copy; - } - return 0; -} - static inline int user_regset_copyin_ignore(unsigned int *pos, unsigned int *count, const void **kbuf, @@ -353,31 +286,19 @@ static inline int user_regset_copyin_ignore(unsigned int *pos, return 0; } -/** - * copy_regset_to_user - fetch a thread's user_regset data into user memory - * @target: thread to be examined - * @view: &struct user_regset_view describing user thread machine state - * @setno: index in @view->regsets - * @offset: offset into the regset data, in bytes - * @size: amount of data to copy, in bytes - * @data: user-mode pointer to copy into - */ -static inline int copy_regset_to_user(struct task_struct *target, - const struct user_regset_view *view, - unsigned int setno, - unsigned int offset, unsigned int size, - void __user *data) -{ - const struct user_regset *regset = &view->regsets[setno]; - - if (!regset->get) - return -EOPNOTSUPP; +extern int regset_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int size, void *data); - if (!access_ok(data, size)) - return -EFAULT; +extern int regset_get_alloc(struct task_struct *target, + const struct user_regset *regset, + unsigned int size, + void **data); - return regset->get(target, regset, offset, size, NULL, data); -} +extern int copy_regset_to_user(struct task_struct *target, + const struct user_regset_view *view, + unsigned int setno, unsigned int offset, + unsigned int size, void __user *data); /** * copy_regset_from_user - store into thread's user_regset data from user memory @@ -405,21 +326,4 @@ static inline int copy_regset_from_user(struct task_struct *target, return regset->set(target, regset, offset, size, NULL, data); } -/** - * regset_size - determine the current size of a regset - * @target: thread to be examined - * @regset: regset to be examined - * - * Note that the returned size is valid only until the next time - * (if any) @regset is modified for @target. - */ -static inline unsigned int regset_size(struct task_struct *target, - const struct user_regset *regset) -{ - if (!regset->get_size) - return regset->n * regset->size; - else - return regset->get_size(target, regset); -} - #endif /* <linux/regset.h> */ diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index e7b7bab8b235..2fa68bf5aa4f 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -38,6 +38,7 @@ #include <linux/types.h> #include <linux/mutex.h> #include <linux/virtio.h> +#include <linux/cdev.h> #include <linux/completion.h> #include <linux/idr.h> #include <linux/of.h> @@ -359,6 +360,7 @@ enum rsc_handling_status { * @unprepare: unprepare device after stop * @start: power on the device and boot it * @stop: power off the device + * @attach: attach to a device that his already powered up * @kick: kick a virtqueue (virtqueue id given as a parameter) * @da_to_va: optional platform hook to perform address translations * @parse_fw: parse firmware to extract information (e.g. resource table) @@ -379,6 +381,7 @@ struct rproc_ops { int (*unprepare)(struct rproc *rproc); int (*start)(struct rproc *rproc); int (*stop)(struct rproc *rproc); + int (*attach)(struct rproc *rproc); void (*kick)(struct rproc *rproc, int vqid); void * (*da_to_va)(struct rproc *rproc, u64 da, size_t len); int (*parse_fw)(struct rproc *rproc, const struct firmware *fw); @@ -400,6 +403,8 @@ struct rproc_ops { * @RPROC_RUNNING: device is up and running * @RPROC_CRASHED: device has crashed; need to start recovery * @RPROC_DELETED: device is deleted + * @RPROC_DETACHED: device has been booted by another entity and waiting + * for the core to attach to it * @RPROC_LAST: just keep this one at the end * * Please note that the values of these states are used as indices @@ -414,7 +419,8 @@ enum rproc_state { RPROC_RUNNING = 2, RPROC_CRASHED = 3, RPROC_DELETED = 4, - RPROC_LAST = 5, + RPROC_DETACHED = 5, + RPROC_LAST = 6, }; /** @@ -435,6 +441,20 @@ enum rproc_crash_type { }; /** + * enum rproc_dump_mechanism - Coredump options for core + * @RPROC_COREDUMP_DEFAULT: Copy dump to separate buffer and carry on with + recovery + * @RPROC_COREDUMP_INLINE: Read segments directly from device memory. Stall + recovery until all segments are read + * @RPROC_COREDUMP_DISABLED: Don't perform any dump + */ +enum rproc_dump_mechanism { + RPROC_COREDUMP_DEFAULT, + RPROC_COREDUMP_INLINE, + RPROC_COREDUMP_DISABLED, +}; + +/** * struct rproc_dump_segment - segment info from ELF header * @node: list node related to the rproc segment list * @da: device address of the segment @@ -451,7 +471,7 @@ struct rproc_dump_segment { void *priv; void (*dump)(struct rproc *rproc, struct rproc_dump_segment *segment, - void *dest); + void *dest, size_t offset, size_t size); loff_t offset; }; @@ -466,6 +486,7 @@ struct rproc_dump_segment { * @dev: virtual device for refcounting and common remoteproc behavior * @power: refcount of users who need this rproc powered up * @state: state of the device + * @dump_conf: Currently selected coredump configuration * @lock: lock which protects concurrent manipulations of the rproc * @dbg_dir: debugfs directory of this rproc device * @traces: list of trace buffers @@ -486,8 +507,11 @@ struct rproc_dump_segment { * @table_sz: size of @cached_table * @has_iommu: flag to indicate if remote processor is behind an MMU * @auto_boot: flag to indicate if remote processor should be auto-started + * @autonomous: true if an external entity has booted the remote processor * @dump_segments: list of segments in the firmware * @nb_vdev: number of vdev currently handled by rproc + * @char_dev: character device of the rproc + * @cdev_put_on_release: flag to indicate if remoteproc should be shutdown on @char_dev release */ struct rproc { struct list_head node; @@ -499,6 +523,7 @@ struct rproc { struct device dev; atomic_t power; unsigned int state; + enum rproc_dump_mechanism dump_conf; struct mutex lock; struct dentry *dbg_dir; struct list_head traces; @@ -519,10 +544,13 @@ struct rproc { size_t table_sz; bool has_iommu; bool auto_boot; + bool autonomous; struct list_head dump_segments; int nb_vdev; u8 elf_class; u16 elf_machine; + struct cdev cdev; + bool cdev_put_on_release; }; /** @@ -603,6 +631,7 @@ void rproc_put(struct rproc *rproc); int rproc_add(struct rproc *rproc); int rproc_del(struct rproc *rproc); void rproc_free(struct rproc *rproc); +void rproc_resource_cleanup(struct rproc *rproc); struct rproc *devm_rproc_alloc(struct device *dev, const char *name, const struct rproc_ops *ops, @@ -630,7 +659,8 @@ int rproc_coredump_add_custom_segment(struct rproc *rproc, dma_addr_t da, size_t size, void (*dumpfn)(struct rproc *rproc, struct rproc_dump_segment *segment, - void *dest), + void *dest, size_t offset, + size_t size), void *priv); int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine); diff --git a/include/linux/remoteproc/qcom_q6v5_ipa_notify.h b/include/linux/remoteproc/qcom_q6v5_ipa_notify.h deleted file mode 100644 index 0820edc0ab7d..000000000000 --- a/include/linux/remoteproc/qcom_q6v5_ipa_notify.h +++ /dev/null @@ -1,82 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -/* Copyright (C) 2019 Linaro Ltd. */ - -#ifndef __QCOM_Q6V5_IPA_NOTIFY_H__ -#define __QCOM_Q6V5_IPA_NOTIFY_H__ - -#if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) - -#include <linux/remoteproc.h> - -enum qcom_rproc_event { - MODEM_STARTING = 0, /* Modem is about to be started */ - MODEM_RUNNING = 1, /* Startup complete; modem is operational */ - MODEM_STOPPING = 2, /* Modem is about to shut down */ - MODEM_CRASHED = 3, /* Modem has crashed (implies stopping) */ - MODEM_OFFLINE = 4, /* Modem is now offline */ - MODEM_REMOVING = 5, /* Modem is about to be removed */ -}; - -typedef void (*qcom_ipa_notify_t)(void *data, enum qcom_rproc_event event); - -struct qcom_rproc_ipa_notify { - struct rproc_subdev subdev; - - qcom_ipa_notify_t notify; - void *data; -}; - -/** - * qcom_add_ipa_notify_subdev() - Register IPA notification subdevice - * @rproc: rproc handle - * @ipa_notify: IPA notification subdevice handle - * - * Register the @ipa_notify subdevice with the @rproc so modem events - * can be sent to IPA when they occur. - * - * This is defined in "qcom_q6v5_ipa_notify.c". - */ -void qcom_add_ipa_notify_subdev(struct rproc *rproc, - struct qcom_rproc_ipa_notify *ipa_notify); - -/** - * qcom_remove_ipa_notify_subdev() - Remove IPA SSR subdevice - * @rproc: rproc handle - * @ipa_notify: IPA notification subdevice handle - * - * This is defined in "qcom_q6v5_ipa_notify.c". - */ -void qcom_remove_ipa_notify_subdev(struct rproc *rproc, - struct qcom_rproc_ipa_notify *ipa_notify); - -/** - * qcom_register_ipa_notify() - Register IPA notification function - * @rproc: Remote processor handle - * @notify: Non-null IPA notification callback function pointer - * @data: Data supplied to IPA notification callback function - * - * @Return: 0 if successful, or a negative error code otherwise - * - * This is defined in "qcom_q6v5_mss.c". - */ -int qcom_register_ipa_notify(struct rproc *rproc, qcom_ipa_notify_t notify, - void *data); -/** - * qcom_deregister_ipa_notify() - Deregister IPA notification function - * @rproc: Remote processor handle - * - * This is defined in "qcom_q6v5_mss.c". - */ -void qcom_deregister_ipa_notify(struct rproc *rproc); - -#else /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */ - -struct qcom_rproc_ipa_notify { /* empty */ }; - -#define qcom_add_ipa_notify_subdev(rproc, ipa_notify) /* no-op */ -#define qcom_remove_ipa_notify_subdev(rproc, ipa_notify) /* no-op */ - -#endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */ - -#endif /* !__QCOM_Q6V5_IPA_NOTIFY_H__ */ diff --git a/include/linux/remoteproc/qcom_rproc.h b/include/linux/remoteproc/qcom_rproc.h index fa8e38681b4b..647051662174 100644 --- a/include/linux/remoteproc/qcom_rproc.h +++ b/include/linux/remoteproc/qcom_rproc.h @@ -5,17 +5,43 @@ struct notifier_block; #if IS_ENABLED(CONFIG_QCOM_RPROC_COMMON) -int qcom_register_ssr_notifier(struct notifier_block *nb); -void qcom_unregister_ssr_notifier(struct notifier_block *nb); +/** + * enum qcom_ssr_notify_type - Startup/Shutdown events related to a remoteproc + * processor. + * + * @QCOM_SSR_BEFORE_POWERUP: Remoteproc about to start (prepare stage) + * @QCOM_SSR_AFTER_POWERUP: Remoteproc is running (start stage) + * @QCOM_SSR_BEFORE_SHUTDOWN: Remoteproc crashed or shutting down (stop stage) + * @QCOM_SSR_AFTER_SHUTDOWN: Remoteproc is down (unprepare stage) + */ +enum qcom_ssr_notify_type { + QCOM_SSR_BEFORE_POWERUP, + QCOM_SSR_AFTER_POWERUP, + QCOM_SSR_BEFORE_SHUTDOWN, + QCOM_SSR_AFTER_SHUTDOWN, +}; + +struct qcom_ssr_notify_data { + const char *name; + bool crashed; +}; + +void *qcom_register_ssr_notifier(const char *name, struct notifier_block *nb); +int qcom_unregister_ssr_notifier(void *notify, struct notifier_block *nb); #else -static inline int qcom_register_ssr_notifier(struct notifier_block *nb) +static inline void *qcom_register_ssr_notifier(const char *name, + struct notifier_block *nb) { - return 0; + return NULL; } -static inline void qcom_unregister_ssr_notifier(struct notifier_block *nb) {} +static inline int qcom_unregister_ssr_notifier(void *notify, + struct notifier_block *nb) +{ + return 0; +} #endif diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index c76b2f3b3ac4..136ea0997e6d 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -143,6 +143,7 @@ bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter); unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu); void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu); +void ring_buffer_reset_online_cpus(struct trace_buffer *buffer); void ring_buffer_reset(struct trace_buffer *buffer); #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP diff --git a/include/linux/rmi.h b/include/linux/rmi.h index 7b22366d0065..8ed37f93f3c8 100644 --- a/include/linux/rmi.h +++ b/include/linux/rmi.h @@ -206,7 +206,7 @@ struct rmi_device_platform_data_spi { * * @reset_delay_ms - after issuing a reset command to the touch sensor, the * driver waits a few milliseconds to give the firmware a chance to - * to re-initialize. You can override the default wait period here. + * re-initialize. You can override the default wait period here. * @irq: irq associated with the attn gpio line, or negative */ struct rmi_device_platform_data { diff --git a/include/linux/rtc.h b/include/linux/rtc.h index bba3db3f7efa..22d1575e4991 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -55,10 +55,6 @@ extern struct class *rtc_class; * * The (current) exceptions are mostly filesystem hooks: * - the proc() hook for procfs - * - non-ioctl() chardev hooks: open(), release() - * - * REVISIT those periodic irq calls *do* have ops_lock when they're - * issued through ioctl() ... */ struct rtc_class_ops { int (*ioctl)(struct device *, unsigned int, unsigned long); diff --git a/include/linux/sched.h b/include/linux/sched.h index 06ec60462af0..53ddc02e2e79 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -32,6 +32,7 @@ #include <linux/task_io_accounting.h> #include <linux/posix-timers.h> #include <linux/rseq.h> +#include <linux/seqlock.h> #include <linux/kcsan.h> /* task_struct member predeclarations (sorted alphabetically): */ @@ -1049,7 +1050,7 @@ struct task_struct { /* Protected by ->alloc_lock: */ nodemask_t mems_allowed; /* Seqence number to catch updates: */ - seqcount_t mems_allowed_seq; + seqcount_spinlock_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; #endif @@ -1648,6 +1649,9 @@ extern int idle_cpu(int cpu); extern int available_idle_cpu(int cpu); extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); +extern void sched_set_fifo(struct task_struct *p); +extern void sched_set_fifo_low(struct task_struct *p); +extern void sched_set_normal(struct task_struct *p, int nice); extern int sched_setattr(struct task_struct *, const struct sched_attr *); extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *); extern struct task_struct *idle_task(int cpu); diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 6be66f52a2ad..f889e332912f 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -175,24 +175,20 @@ static inline bool in_vfork(struct task_struct *tsk) * Applies per-task gfp context to the given allocation flags. * PF_MEMALLOC_NOIO implies GFP_NOIO * PF_MEMALLOC_NOFS implies GFP_NOFS - * PF_MEMALLOC_NOCMA implies no allocation from CMA region. */ static inline gfp_t current_gfp_context(gfp_t flags) { - if (unlikely(current->flags & - (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_NOCMA))) { + unsigned int pflags = READ_ONCE(current->flags); + + if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) { /* * NOIO implies both NOIO and NOFS and it is a weaker context * so always make sure it makes precedence */ - if (current->flags & PF_MEMALLOC_NOIO) + if (pflags & PF_MEMALLOC_NOIO) flags &= ~(__GFP_IO | __GFP_FS); - else if (current->flags & PF_MEMALLOC_NOFS) + else if (pflags & PF_MEMALLOC_NOFS) flags &= ~__GFP_FS; -#ifdef CONFIG_CMA - if (current->flags & PF_MEMALLOC_NOCMA) - flags &= ~__GFP_MOVABLE; -#endif } return flags; } diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index ae3060f0b0c9..a98965007eef 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -88,6 +88,7 @@ struct task_struct *fork_idle(int); struct mm_struct *copy_init_mm(void); extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); extern long kernel_wait4(pid_t, int __user *, int, struct rusage *); +int kernel_wait(pid_t pid, int *stat); extern void free_task(struct task_struct *tsk); diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 54bc20496392..962d9768945f 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -10,13 +10,16 @@ * * Copyrights: * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli + * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH */ -#include <linux/spinlock.h> -#include <linux/preempt.h> -#include <linux/lockdep.h> #include <linux/compiler.h> #include <linux/kcsan-checks.h> +#include <linux/lockdep.h> +#include <linux/mutex.h> +#include <linux/preempt.h> +#include <linux/spinlock.h> + #include <asm/processor.h> /* @@ -48,6 +51,10 @@ * This mechanism can't be used if the protected data contains pointers, * as the writer can invalidate a pointer that a reader is following. * + * If the write serialization mechanism is one of the common kernel + * locking primitives, use a sequence counter with associated lock + * (seqcount_LOCKTYPE_t) instead. + * * If it's desired to automatically handle the sequence counter writer * serialization and non-preemptibility requirements, use a sequential * lock (seqlock_t) instead. @@ -72,17 +79,18 @@ static inline void __seqcount_init(seqcount_t *s, const char *name, } #ifdef CONFIG_DEBUG_LOCK_ALLOC -# define SEQCOUNT_DEP_MAP_INIT(lockname) \ - .dep_map = { .name = #lockname } \ + +# define SEQCOUNT_DEP_MAP_INIT(lockname) \ + .dep_map = { .name = #lockname } /** * seqcount_init() - runtime initializer for seqcount_t * @s: Pointer to the seqcount_t instance */ -# define seqcount_init(s) \ - do { \ - static struct lock_class_key __key; \ - __seqcount_init((s), #s, &__key); \ +# define seqcount_init(s) \ + do { \ + static struct lock_class_key __key; \ + __seqcount_init((s), #s, &__key); \ } while (0) static inline void seqcount_lockdep_reader_access(const seqcount_t *s) @@ -108,9 +116,143 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) */ #define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) } +/* + * Sequence counters with associated locks (seqcount_LOCKTYPE_t) + * + * A sequence counter which associates the lock used for writer + * serialization at initialization time. This enables lockdep to validate + * that the write side critical section is properly serialized. + * + * For associated locks which do not implicitly disable preemption, + * preemption protection is enforced in the write side function. + * + * Lockdep is never used in any for the raw write variants. + * + * See Documentation/locking/seqlock.rst + */ + +#ifdef CONFIG_LOCKDEP +#define __SEQ_LOCK(expr) expr +#else +#define __SEQ_LOCK(expr) +#endif + +/** + * typedef seqcount_LOCKNAME_t - sequence counter with LOCKTYPR associated + * @seqcount: The real sequence counter + * @lock: Pointer to the associated spinlock + * + * A plain sequence counter with external writer synchronization by a + * spinlock. The spinlock is associated to the sequence count in the + * static initializer or init function. This enables lockdep to validate + * that the write side critical section is properly serialized. + */ + +/** + * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t + * @s: Pointer to the seqcount_LOCKNAME_t instance + * @lock: Pointer to the associated LOCKTYPE + */ + +/* + * SEQCOUNT_LOCKTYPE() - Instantiate seqcount_LOCKNAME_t and helpers + * @locktype: actual typename + * @lockname: name + * @preemptible: preemptibility of above locktype + * @lockmember: argument for lockdep_assert_held() + */ +#define SEQCOUNT_LOCKTYPE(locktype, lockname, preemptible, lockmember) \ +typedef struct seqcount_##lockname { \ + seqcount_t seqcount; \ + __SEQ_LOCK(locktype *lock); \ +} seqcount_##lockname##_t; \ + \ +static __always_inline void \ +seqcount_##lockname##_init(seqcount_##lockname##_t *s, locktype *lock) \ +{ \ + seqcount_init(&s->seqcount); \ + __SEQ_LOCK(s->lock = lock); \ +} \ + \ +static __always_inline seqcount_t * \ +__seqcount_##lockname##_ptr(seqcount_##lockname##_t *s) \ +{ \ + return &s->seqcount; \ +} \ + \ +static __always_inline bool \ +__seqcount_##lockname##_preemptible(seqcount_##lockname##_t *s) \ +{ \ + return preemptible; \ +} \ + \ +static __always_inline void \ +__seqcount_##lockname##_assert(seqcount_##lockname##_t *s) \ +{ \ + __SEQ_LOCK(lockdep_assert_held(lockmember)); \ +} + +/* + * __seqprop() for seqcount_t + */ + +static inline seqcount_t *__seqcount_ptr(seqcount_t *s) +{ + return s; +} + +static inline bool __seqcount_preemptible(seqcount_t *s) +{ + return false; +} + +static inline void __seqcount_assert(seqcount_t *s) +{ + lockdep_assert_preemption_disabled(); +} + +SEQCOUNT_LOCKTYPE(raw_spinlock_t, raw_spinlock, false, s->lock) +SEQCOUNT_LOCKTYPE(spinlock_t, spinlock, false, s->lock) +SEQCOUNT_LOCKTYPE(rwlock_t, rwlock, false, s->lock) +SEQCOUNT_LOCKTYPE(struct mutex, mutex, true, s->lock) +SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mutex, true, &s->lock->base) + +/** + * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t + * @name: Name of the seqcount_LOCKNAME_t instance + * @lock: Pointer to the associated LOCKTYPE + */ + +#define SEQCOUNT_LOCKTYPE_ZERO(seq_name, assoc_lock) { \ + .seqcount = SEQCNT_ZERO(seq_name.seqcount), \ + __SEQ_LOCK(.lock = (assoc_lock)) \ +} + +#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock) +#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock) +#define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock) +#define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock) +#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock) + + +#define __seqprop_case(s, lockname, prop) \ + seqcount_##lockname##_t: __seqcount_##lockname##_##prop((void *)(s)) + +#define __seqprop(s, prop) _Generic(*(s), \ + seqcount_t: __seqcount_##prop((void *)(s)), \ + __seqprop_case((s), raw_spinlock, prop), \ + __seqprop_case((s), spinlock, prop), \ + __seqprop_case((s), rwlock, prop), \ + __seqprop_case((s), mutex, prop), \ + __seqprop_case((s), ww_mutex, prop)) + +#define __seqcount_ptr(s) __seqprop(s, ptr) +#define __seqcount_lock_preemptible(s) __seqprop(s, preemptible) +#define __seqcount_assert_lock_held(s) __seqprop(s, assert) + /** * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb() * barrier. Callers should ensure that smp_rmb() or equivalent ordering is @@ -122,7 +264,10 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) * * Return: count to be passed to read_seqcount_retry() */ -static inline unsigned __read_seqcount_begin(const seqcount_t *s) +#define __read_seqcount_begin(s) \ + __read_seqcount_t_begin(__seqcount_ptr(s)) + +static inline unsigned __read_seqcount_t_begin(const seqcount_t *s) { unsigned ret; @@ -138,32 +283,38 @@ repeat: /** * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * Return: count to be passed to read_seqcount_retry() */ -static inline unsigned raw_read_seqcount_begin(const seqcount_t *s) +#define raw_read_seqcount_begin(s) \ + raw_read_seqcount_t_begin(__seqcount_ptr(s)) + +static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s) { - unsigned ret = __read_seqcount_begin(s); + unsigned ret = __read_seqcount_t_begin(s); smp_rmb(); return ret; } /** * read_seqcount_begin() - begin a seqcount_t read critical section - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * Return: count to be passed to read_seqcount_retry() */ -static inline unsigned read_seqcount_begin(const seqcount_t *s) +#define read_seqcount_begin(s) \ + read_seqcount_t_begin(__seqcount_ptr(s)) + +static inline unsigned read_seqcount_t_begin(const seqcount_t *s) { seqcount_lockdep_reader_access(s); - return raw_read_seqcount_begin(s); + return raw_read_seqcount_t_begin(s); } /** * raw_read_seqcount() - read the raw seqcount_t counter value - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * raw_read_seqcount opens a read critical section of the given * seqcount_t, without any lockdep checking, and without checking or @@ -172,7 +323,10 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s) * * Return: count to be passed to read_seqcount_retry() */ -static inline unsigned raw_read_seqcount(const seqcount_t *s) +#define raw_read_seqcount(s) \ + raw_read_seqcount_t(__seqcount_ptr(s)) + +static inline unsigned raw_read_seqcount_t(const seqcount_t *s) { unsigned ret = READ_ONCE(s->sequence); smp_rmb(); @@ -183,7 +337,7 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s) /** * raw_seqcount_begin() - begin a seqcount_t read critical section w/o * lockdep and w/o counter stabilization - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * raw_seqcount_begin opens a read critical section of the given * seqcount_t. Unlike read_seqcount_begin(), this function will not wait @@ -197,18 +351,21 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s) * * Return: count to be passed to read_seqcount_retry() */ -static inline unsigned raw_seqcount_begin(const seqcount_t *s) +#define raw_seqcount_begin(s) \ + raw_seqcount_t_begin(__seqcount_ptr(s)) + +static inline unsigned raw_seqcount_t_begin(const seqcount_t *s) { /* * If the counter is odd, let read_seqcount_retry() fail * by decrementing the counter. */ - return raw_read_seqcount(s) & ~1; + return raw_read_seqcount_t(s) & ~1; } /** * __read_seqcount_retry() - end a seqcount_t read section w/o barrier - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * @start: count, from read_seqcount_begin() * * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb() @@ -221,7 +378,10 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s) * * Return: true if a read section retry is required, else false */ -static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) +#define __read_seqcount_retry(s, start) \ + __read_seqcount_t_retry(__seqcount_ptr(s), start) + +static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start) { kcsan_atomic_next(0); return unlikely(READ_ONCE(s->sequence) != start); @@ -229,7 +389,7 @@ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) /** * read_seqcount_retry() - end a seqcount_t read critical section - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * @start: count, from read_seqcount_begin() * * read_seqcount_retry closes the read critical section of given @@ -238,17 +398,28 @@ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) * * Return: true if a read section retry is required, else false */ -static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) +#define read_seqcount_retry(s, start) \ + read_seqcount_t_retry(__seqcount_ptr(s), start) + +static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start) { smp_rmb(); - return __read_seqcount_retry(s, start); + return __read_seqcount_t_retry(s, start); } /** * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants */ -static inline void raw_write_seqcount_begin(seqcount_t *s) +#define raw_write_seqcount_begin(s) \ +do { \ + if (__seqcount_lock_preemptible(s)) \ + preempt_disable(); \ + \ + raw_write_seqcount_t_begin(__seqcount_ptr(s)); \ +} while (0) + +static inline void raw_write_seqcount_t_begin(seqcount_t *s) { kcsan_nestable_atomic_begin(); s->sequence++; @@ -257,49 +428,50 @@ static inline void raw_write_seqcount_begin(seqcount_t *s) /** * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants */ -static inline void raw_write_seqcount_end(seqcount_t *s) +#define raw_write_seqcount_end(s) \ +do { \ + raw_write_seqcount_t_end(__seqcount_ptr(s)); \ + \ + if (__seqcount_lock_preemptible(s)) \ + preempt_enable(); \ +} while (0) + +static inline void raw_write_seqcount_t_end(seqcount_t *s) { smp_wmb(); s->sequence++; kcsan_nestable_atomic_end(); } -static inline void __write_seqcount_begin_nested(seqcount_t *s, int subclass) -{ - raw_write_seqcount_begin(s); - seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); -} - /** * write_seqcount_begin_nested() - start a seqcount_t write section with * custom lockdep nesting level - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * @subclass: lockdep nesting level * * See Documentation/locking/lockdep-design.rst */ -static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass) +#define write_seqcount_begin_nested(s, subclass) \ +do { \ + __seqcount_assert_lock_held(s); \ + \ + if (__seqcount_lock_preemptible(s)) \ + preempt_disable(); \ + \ + write_seqcount_t_begin_nested(__seqcount_ptr(s), subclass); \ +} while (0) + +static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass) { - lockdep_assert_preemption_disabled(); - __write_seqcount_begin_nested(s, subclass); -} - -/* - * A write_seqcount_begin() variant w/o lockdep non-preemptibility checks. - * - * Use for internal seqlock.h code where it's known that preemption is - * already disabled. For example, seqlock_t write side functions. - */ -static inline void __write_seqcount_begin(seqcount_t *s) -{ - __write_seqcount_begin_nested(s, 0); + raw_write_seqcount_t_begin(s); + seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); } /** * write_seqcount_begin() - start a seqcount_t write side critical section - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * write_seqcount_begin opens a write side critical section of the given * seqcount_t. @@ -308,26 +480,44 @@ static inline void __write_seqcount_begin(seqcount_t *s) * non-preemptible. If readers can be invoked from hardirq or softirq * context, interrupts or bottom halves must be respectively disabled. */ -static inline void write_seqcount_begin(seqcount_t *s) +#define write_seqcount_begin(s) \ +do { \ + __seqcount_assert_lock_held(s); \ + \ + if (__seqcount_lock_preemptible(s)) \ + preempt_disable(); \ + \ + write_seqcount_t_begin(__seqcount_ptr(s)); \ +} while (0) + +static inline void write_seqcount_t_begin(seqcount_t *s) { - write_seqcount_begin_nested(s, 0); + write_seqcount_t_begin_nested(s, 0); } /** * write_seqcount_end() - end a seqcount_t write side critical section - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * The write section must've been opened with write_seqcount_begin(). */ -static inline void write_seqcount_end(seqcount_t *s) +#define write_seqcount_end(s) \ +do { \ + write_seqcount_t_end(__seqcount_ptr(s)); \ + \ + if (__seqcount_lock_preemptible(s)) \ + preempt_enable(); \ +} while (0) + +static inline void write_seqcount_t_end(seqcount_t *s) { seqcount_release(&s->dep_map, _RET_IP_); - raw_write_seqcount_end(s); + raw_write_seqcount_t_end(s); } /** * raw_write_seqcount_barrier() - do a seqcount_t write barrier - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * This can be used to provide an ordering guarantee instead of the usual * consistency guarantee. It is one wmb cheaper, because it can collapse @@ -366,7 +556,10 @@ static inline void write_seqcount_end(seqcount_t *s) * WRITE_ONCE(X, false); * } */ -static inline void raw_write_seqcount_barrier(seqcount_t *s) +#define raw_write_seqcount_barrier(s) \ + raw_write_seqcount_t_barrier(__seqcount_ptr(s)) + +static inline void raw_write_seqcount_t_barrier(seqcount_t *s) { kcsan_nestable_atomic_begin(); s->sequence++; @@ -378,12 +571,15 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s) /** * write_seqcount_invalidate() - invalidate in-progress seqcount_t read * side operations - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * After write_seqcount_invalidate, no seqcount_t read side operations * will complete successfully and see data older than this. */ -static inline void write_seqcount_invalidate(seqcount_t *s) +#define write_seqcount_invalidate(s) \ + write_seqcount_t_invalidate(__seqcount_ptr(s)) + +static inline void write_seqcount_t_invalidate(seqcount_t *s) { smp_wmb(); kcsan_nestable_atomic_begin(); @@ -393,7 +589,7 @@ static inline void write_seqcount_invalidate(seqcount_t *s) /** * raw_read_seqcount_latch() - pick even/odd seqcount_t latch data copy - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * Use seqcount_t latching to switch between two storage places protected * by a sequence counter. Doing so allows having interruptible, preemptible, @@ -406,7 +602,10 @@ static inline void write_seqcount_invalidate(seqcount_t *s) * picking which data copy to read. The full counter value must then be * checked with read_seqcount_retry(). */ -static inline int raw_read_seqcount_latch(seqcount_t *s) +#define raw_read_seqcount_latch(s) \ + raw_read_seqcount_t_latch(__seqcount_ptr(s)) + +static inline int raw_read_seqcount_t_latch(seqcount_t *s) { /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */ int seq = READ_ONCE(s->sequence); /* ^^^ */ @@ -415,7 +614,7 @@ static inline int raw_read_seqcount_latch(seqcount_t *s) /** * raw_write_seqcount_latch() - redirect readers to even/odd copy - * @s: Pointer to seqcount_t + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * The latch technique is a multiversion concurrency control method that allows * queries during non-atomic modifications. If you can guarantee queries never @@ -494,7 +693,10 @@ static inline int raw_read_seqcount_latch(seqcount_t *s) * When data is a dynamic data structure; one should use regular RCU * patterns to manage the lifetimes of the objects within. */ -static inline void raw_write_seqcount_latch(seqcount_t *s) +#define raw_write_seqcount_latch(s) \ + raw_write_seqcount_t_latch(__seqcount_ptr(s)) + +static inline void raw_write_seqcount_t_latch(seqcount_t *s) { smp_wmb(); /* prior stores before incrementing "sequence" */ s->sequence++; @@ -516,20 +718,20 @@ typedef struct { spinlock_t lock; } seqlock_t; -#define __SEQLOCK_UNLOCKED(lockname) \ - { \ - .seqcount = SEQCNT_ZERO(lockname), \ - .lock = __SPIN_LOCK_UNLOCKED(lockname) \ +#define __SEQLOCK_UNLOCKED(lockname) \ + { \ + .seqcount = SEQCNT_ZERO(lockname), \ + .lock = __SPIN_LOCK_UNLOCKED(lockname) \ } /** * seqlock_init() - dynamic initializer for seqlock_t * @sl: Pointer to the seqlock_t instance */ -#define seqlock_init(sl) \ - do { \ - seqcount_init(&(sl)->seqcount); \ - spin_lock_init(&(sl)->lock); \ +#define seqlock_init(sl) \ + do { \ + seqcount_init(&(sl)->seqcount); \ + spin_lock_init(&(sl)->lock); \ } while (0) /** @@ -592,7 +794,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) static inline void write_seqlock(seqlock_t *sl) { spin_lock(&sl->lock); - __write_seqcount_begin(&sl->seqcount); + write_seqcount_t_begin(&sl->seqcount); } /** @@ -604,7 +806,7 @@ static inline void write_seqlock(seqlock_t *sl) */ static inline void write_sequnlock(seqlock_t *sl) { - write_seqcount_end(&sl->seqcount); + write_seqcount_t_end(&sl->seqcount); spin_unlock(&sl->lock); } @@ -618,7 +820,7 @@ static inline void write_sequnlock(seqlock_t *sl) static inline void write_seqlock_bh(seqlock_t *sl) { spin_lock_bh(&sl->lock); - __write_seqcount_begin(&sl->seqcount); + write_seqcount_t_begin(&sl->seqcount); } /** @@ -631,7 +833,7 @@ static inline void write_seqlock_bh(seqlock_t *sl) */ static inline void write_sequnlock_bh(seqlock_t *sl) { - write_seqcount_end(&sl->seqcount); + write_seqcount_t_end(&sl->seqcount); spin_unlock_bh(&sl->lock); } @@ -645,7 +847,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl) static inline void write_seqlock_irq(seqlock_t *sl) { spin_lock_irq(&sl->lock); - __write_seqcount_begin(&sl->seqcount); + write_seqcount_t_begin(&sl->seqcount); } /** @@ -657,7 +859,7 @@ static inline void write_seqlock_irq(seqlock_t *sl) */ static inline void write_sequnlock_irq(seqlock_t *sl) { - write_seqcount_end(&sl->seqcount); + write_seqcount_t_end(&sl->seqcount); spin_unlock_irq(&sl->lock); } @@ -666,7 +868,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) unsigned long flags; spin_lock_irqsave(&sl->lock, flags); - __write_seqcount_begin(&sl->seqcount); + write_seqcount_t_begin(&sl->seqcount); return flags; } @@ -695,13 +897,13 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) static inline void write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) { - write_seqcount_end(&sl->seqcount); + write_seqcount_t_end(&sl->seqcount); spin_unlock_irqrestore(&sl->lock, flags); } /** * read_seqlock_excl() - begin a seqlock_t locking reader section - * @sl: Pointer to seqlock_t + * @sl: Pointer to seqlock_t * * read_seqlock_excl opens a seqlock_t locking reader critical section. A * locking reader exclusively locks out *both* other writers *and* other diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 6545f8cfc8fa..2b70f736b091 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -155,6 +155,8 @@ extern int early_serial_setup(struct uart_port *port); extern int early_serial8250_setup(struct earlycon_device *device, const char *options); +extern void serial8250_update_uartclk(struct uart_port *port, + unsigned int uartclk); extern void serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old); extern void serial8250_do_set_ldisc(struct uart_port *port, diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 791f4844efeb..01fc4d9c9c54 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -10,7 +10,6 @@ #include <linux/bitops.h> #include <linux/compiler.h> #include <linux/console.h> -#include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/circ_buf.h> #include <linux/spinlock.h> @@ -30,6 +29,7 @@ struct uart_port; struct serial_struct; struct device; +struct gpio_desc; /* * This structure describes all the operations that can be done on the diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 7a35a6901221..a5a5d1d4d7b1 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -36,6 +36,9 @@ struct shmem_sb_info { unsigned char huge; /* Whether to try for hugepages */ kuid_t uid; /* Mount uid for root directory */ kgid_t gid; /* Mount gid for root directory */ + bool full_inums; /* If i_ino should be uint or ino_t */ + ino_t next_ino; /* The next per-sb inode number to use */ + ino_t __percpu *ino_batch; /* The next per-cpu inode number to use */ struct mempolicy *mpol; /* default memory policy for mappings */ spinlock_t shrinklist_lock; /* Protects shrinklist */ struct list_head shrinklist; /* List of shinkable inodes */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 3ad65d4ce085..46881d902124 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -238,6 +238,7 @@ SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +struct ahash_request; struct net_device; struct scatterlist; struct pipe_inode_info; diff --git a/include/linux/slab.h b/include/linux/slab.h index 6d454886bcaf..24df2393ec03 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -155,9 +155,6 @@ struct kmem_cache *kmem_cache_create_usercopy(const char *name, void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); -void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *); -void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *); - /* * Please use this macro to create slab caches. Simply specify the * name of the structure and maybe some flags that are listed above. @@ -186,10 +183,12 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *); */ void * __must_check krealloc(const void *, size_t, gfp_t); void kfree(const void *); -void kzfree(const void *); +void kfree_sensitive(const void *); size_t __ksize(const void *); size_t ksize(const void *); +#define kzfree(x) kfree_sensitive(x) /* For backward compatibility */ + #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR void __check_heap_object(const void *ptr, unsigned long n, struct page *page, bool to_user); @@ -578,8 +577,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) return __kmalloc_node(size, flags, node); } -int memcg_update_all_caches(int num_memcgs); - /** * kmalloc_array - allocate memory for an array. * @n: number of elements. diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index abc7de77b988..9eb430c163c2 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -72,9 +72,6 @@ struct kmem_cache { int obj_offset; #endif /* CONFIG_DEBUG_SLAB */ -#ifdef CONFIG_MEMCG - struct memcg_cache_params memcg_params; -#endif #ifdef CONFIG_KASAN struct kasan_cache kasan_info; #endif @@ -114,4 +111,10 @@ static inline unsigned int obj_to_index(const struct kmem_cache *cache, return reciprocal_divide(offset, cache->reciprocal_buffer_size); } +static inline int objs_per_slab_page(const struct kmem_cache *cache, + const struct page *page) +{ + return cache->num; +} + #endif /* _LINUX_SLAB_DEF_H */ diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index d2153789bd9f..1be0ed5befa1 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -8,6 +8,7 @@ * (C) 2007 SGI, Christoph Lameter */ #include <linux/kobject.h> +#include <linux/reciprocal_div.h> enum stat_item { ALLOC_FASTPATH, /* Allocation from cpu slab */ @@ -86,6 +87,7 @@ struct kmem_cache { unsigned long min_partial; unsigned int size; /* The size of an object including metadata */ unsigned int object_size;/* The size of an object without metadata */ + struct reciprocal_value reciprocal_size; unsigned int offset; /* Free pointer offset */ #ifdef CONFIG_SLUB_CPU_PARTIAL /* Number of per cpu partial objects to keep around */ @@ -106,17 +108,7 @@ struct kmem_cache { struct list_head list; /* List of slab caches */ #ifdef CONFIG_SYSFS struct kobject kobj; /* For sysfs */ - struct work_struct kobj_remove_work; #endif -#ifdef CONFIG_MEMCG - struct memcg_cache_params memcg_params; - /* For propagation, maximum size of a stored attr */ - unsigned int max_attr_size; -#ifdef CONFIG_SYSFS - struct kset *memcg_kset; -#endif -#endif - #ifdef CONFIG_SLAB_FREELIST_HARDENED unsigned long random; #endif @@ -182,4 +174,23 @@ static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, return result; } +/* Determine object index from a given position */ +static inline unsigned int __obj_to_index(const struct kmem_cache *cache, + void *addr, void *obj) +{ + return reciprocal_divide(kasan_reset_tag(obj) - addr, + cache->reciprocal_size); +} + +static inline unsigned int obj_to_index(const struct kmem_cache *cache, + const struct page *page, void *obj) +{ + return __obj_to_index(cache, page_address(page), obj); +} + +static inline int objs_per_slab_page(const struct kmem_cache *cache, + const struct page *page) +{ + return page->objects; +} #endif /* _LINUX_SLUB_DEF_H */ diff --git a/include/linux/soc/ti/k3-ringacc.h b/include/linux/soc/ti/k3-ringacc.h index 7ac115432fa1..5a472eca5ee4 100644 --- a/include/linux/soc/ti/k3-ringacc.h +++ b/include/linux/soc/ti/k3-ringacc.h @@ -2,7 +2,7 @@ /* * K3 Ring Accelerator (RA) subsystem interface * - * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com */ #ifndef __SOC_TI_K3_RINGACC_API_H_ diff --git a/include/linux/soc/ti/knav_qmss.h b/include/linux/soc/ti/knav_qmss.h index 9745df6ed9d3..c75ef99c99ca 100644 --- a/include/linux/soc/ti/knav_qmss.h +++ b/include/linux/soc/ti/knav_qmss.h @@ -1,7 +1,7 @@ /* * Keystone Navigator Queue Management Sub-System header * - * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com * Author: Sandeep Nair <sandeep_n@ti.com> * Cyril Chemparathy <cyril@ti.com> * Santosh Shilimkar <santosh.shilimkar@ti.com> diff --git a/include/linux/soc/ti/ti-msgmgr.h b/include/linux/soc/ti/ti-msgmgr.h index eac8e0c6fe11..1f6e76d423cf 100644 --- a/include/linux/soc/ti/ti-msgmgr.h +++ b/include/linux/soc/ti/ti-msgmgr.h @@ -1,7 +1,7 @@ /* * Texas Instruments' Message Manager * - * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/ * Nishanth Menon * * This program is free software; you can redistribute it and/or modify diff --git a/include/linux/socket.h b/include/linux/socket.h index 04d2bc97f497..e9cb30d8cbfb 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -10,6 +10,7 @@ #include <linux/compiler.h> /* __user */ #include <uapi/linux/socket.h> +struct file; struct pid; struct cred; struct socket; diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h index 320c672d84de..4af31bbc8802 100644 --- a/include/linux/sunrpc/rpc_rdma.h +++ b/include/linux/sunrpc/rpc_rdma.h @@ -124,4 +124,78 @@ rpcrdma_decode_buffer_size(u8 val) return ((unsigned int)val + 1) << 10; } +/** + * xdr_encode_rdma_segment - Encode contents of an RDMA segment + * @p: Pointer into a send buffer + * @handle: The RDMA handle to encode + * @length: The RDMA length to encode + * @offset: The RDMA offset to encode + * + * Return value: + * Pointer to the XDR position that follows the encoded RDMA segment + */ +static inline __be32 *xdr_encode_rdma_segment(__be32 *p, u32 handle, + u32 length, u64 offset) +{ + *p++ = cpu_to_be32(handle); + *p++ = cpu_to_be32(length); + return xdr_encode_hyper(p, offset); +} + +/** + * xdr_encode_read_segment - Encode contents of a Read segment + * @p: Pointer into a send buffer + * @position: The position to encode + * @handle: The RDMA handle to encode + * @length: The RDMA length to encode + * @offset: The RDMA offset to encode + * + * Return value: + * Pointer to the XDR position that follows the encoded Read segment + */ +static inline __be32 *xdr_encode_read_segment(__be32 *p, u32 position, + u32 handle, u32 length, + u64 offset) +{ + *p++ = cpu_to_be32(position); + return xdr_encode_rdma_segment(p, handle, length, offset); +} + +/** + * xdr_decode_rdma_segment - Decode contents of an RDMA segment + * @p: Pointer to the undecoded RDMA segment + * @handle: Upon return, the RDMA handle + * @length: Upon return, the RDMA length + * @offset: Upon return, the RDMA offset + * + * Return value: + * Pointer to the XDR item that follows the RDMA segment + */ +static inline __be32 *xdr_decode_rdma_segment(__be32 *p, u32 *handle, + u32 *length, u64 *offset) +{ + *handle = be32_to_cpup(p++); + *length = be32_to_cpup(p++); + return xdr_decode_hyper(p, offset); +} + +/** + * xdr_decode_read_segment - Decode contents of a Read segment + * @p: Pointer to the undecoded Read segment + * @position: Upon return, the segment's position + * @handle: Upon return, the RDMA handle + * @length: Upon return, the RDMA length + * @offset: Upon return, the RDMA offset + * + * Return value: + * Pointer to the XDR item that follows the Read segment + */ +static inline __be32 *xdr_decode_read_segment(__be32 *p, u32 *position, + u32 *handle, u32 *length, + u64 *offset) +{ + *position = be32_to_cpup(p++); + return xdr_decode_rdma_segment(p, handle, length, offset); +} + #endif /* _LINUX_SUNRPC_RPC_RDMA_H */ diff --git a/include/linux/sunrpc/rpc_rdma_cid.h b/include/linux/sunrpc/rpc_rdma_cid.h new file mode 100644 index 000000000000..be24ab2baa6a --- /dev/null +++ b/include/linux/sunrpc/rpc_rdma_cid.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * * Copyright (c) 2020, Oracle and/or its affiliates. + */ + +#ifndef RPC_RDMA_CID_H +#define RPC_RDMA_CID_H + +/* + * The rpc_rdma_cid struct records completion ID information. A + * completion ID matches an incoming Send or Receive completion + * to a Completion Queue and to a previous ib_post_*(). The ID + * can then be displayed in an error message or recorded in a + * trace record. + * + * This struct is shared between the server and client RPC/RDMA + * transport implementations. + */ +struct rpc_rdma_cid { + u32 ci_queue_id; + int ci_completion_id; +}; + +#endif /* RPC_RDMA_CID_H */ diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 7ed82625dc0b..9dc3a3b88391 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -46,6 +46,7 @@ #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/svcsock.h> #include <linux/sunrpc/rpc_rdma.h> +#include <linux/sunrpc/rpc_rdma_cid.h> #include <rdma/ib_verbs.h> #include <rdma/rdma_cm.h> @@ -109,6 +110,8 @@ struct svcxprt_rdma { struct work_struct sc_work; struct llist_head sc_recv_ctxts; + + atomic_t sc_completion_ids; }; /* sc_flags */ #define RDMAXPRT_CONN_PENDING 3 @@ -129,6 +132,7 @@ struct svc_rdma_recv_ctxt { struct list_head rc_list; struct ib_recv_wr rc_recv_wr; struct ib_cqe rc_cqe; + struct rpc_rdma_cid rc_cid; struct ib_sge rc_recv_sge; void *rc_recv_buf; struct xdr_buf rc_arg; @@ -147,6 +151,8 @@ struct svc_rdma_recv_ctxt { struct svc_rdma_send_ctxt { struct list_head sc_list; + struct rpc_rdma_cid sc_cid; + struct ib_send_wr sc_send_wr; struct ib_cqe sc_cqe; struct xdr_buf sc_hdrbuf; @@ -190,20 +196,21 @@ extern struct svc_rdma_send_ctxt * svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma); extern void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt); -extern int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr); +extern int svc_rdma_send(struct svcxprt_rdma *rdma, + struct svc_rdma_send_ctxt *ctxt); extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *sctxt, const struct svc_rdma_recv_ctxt *rctxt, struct xdr_buf *xdr); +extern void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, + struct svc_rdma_send_ctxt *sctxt, + struct svc_rdma_recv_ctxt *rctxt, + int status); extern int svc_rdma_sendto(struct svc_rqst *); extern int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset, unsigned int length); /* svc_rdma_transport.c */ -extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); -extern void svc_sq_reap(struct svcxprt_rdma *); -extern void svc_rq_reap(struct svcxprt_rdma *); - extern struct svc_xprt_class svc_rdma_class; #ifdef CONFIG_SUNRPC_BACKCHANNEL extern struct svc_xprt_class svc_rdma_bc_class; diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 22c207b2425f..5a6a81b7cd9f 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -475,6 +475,32 @@ xdr_stream_encode_uint32_array(struct xdr_stream *xdr, } /** + * xdr_item_is_absent - symbolically handle XDR discriminators + * @p: pointer to undecoded discriminator + * + * Return values: + * %true if the following XDR item is absent + * %false if the following XDR item is present + */ +static inline bool xdr_item_is_absent(const __be32 *p) +{ + return *p == xdr_zero; +} + +/** + * xdr_item_is_present - symbolically handle XDR discriminators + * @p: pointer to undecoded discriminator + * + * Return values: + * %true if the following XDR item is present + * %false if the following XDR item is absent + */ +static inline bool xdr_item_is_present(const __be32 *p) +{ + return *p != xdr_zero; +} + +/** * xdr_stream_decode_u32 - Decode a 32-bit integer * @xdr: pointer to xdr_stream * @ptr: location to store integer diff --git a/include/linux/suspend.h b/include/linux/suspend.h index b960098acfb0..cb9afad82a90 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -453,6 +453,8 @@ extern bool hibernation_available(void); asmlinkage int swsusp_save(void); extern struct pbe *restore_pblist; int pfn_is_nosave(unsigned long pfn); + +int hibernate_quiet_exec(int (*func)(void *data), void *data); #else /* CONFIG_HIBERNATION */ static inline void register_nosave_region(unsigned long b, unsigned long e) {} static inline void register_nosave_region_late(unsigned long b, unsigned long e) {} @@ -464,6 +466,10 @@ static inline void hibernation_set_ops(const struct platform_hibernation_ops *op static inline int hibernate(void) { return -ENOSYS; } static inline bool system_entering_hibernation(void) { return false; } static inline bool hibernation_available(void) { return false; } + +static inline int hibernate_quiet_exec(int (*func)(void *data), void *data) { + return -ENOTSUPP; +} #endif /* CONFIG_HIBERNATION */ #ifdef CONFIG_HIBERNATION_SNAPSHOT_DEV diff --git a/include/linux/swap.h b/include/linux/swap.h index 5b3216ba39a9..661046994db4 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -328,7 +328,6 @@ void workingset_update_node(struct xa_node *node); /* linux/mm/page_alloc.c */ extern unsigned long totalreserve_pages; extern unsigned long nr_free_buffer_pages(void); -extern unsigned long nr_free_pagecache_pages(void); /* Definition of global_zone_page_state not available yet */ #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES) @@ -353,7 +352,7 @@ extern void deactivate_page(struct page *page); extern void mark_page_lazyfree(struct page *page); extern void swap_setup(void); -extern void lru_cache_add_active_or_unevictable(struct page *page, +extern void lru_cache_add_inactive_or_unevictable(struct page *page, struct vm_area_struct *vma); /* linux/mm/vmscan.c */ @@ -372,7 +371,6 @@ extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, extern unsigned long shrink_all_memory(unsigned long nr_pages); extern int vm_swappiness; extern int remove_mapping(struct address_space *mapping, struct page *page); -extern unsigned long vm_total_pages; extern unsigned long reclaim_pages(struct list_head *page_list); #ifdef CONFIG_NUMA @@ -416,9 +414,14 @@ extern struct address_space *swapper_spaces[]; extern unsigned long total_swapcache_pages(void); extern void show_swap_cache_info(void); extern int add_to_swap(struct page *page); -extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t); -extern void __delete_from_swap_cache(struct page *, swp_entry_t entry); +extern void *get_shadow_from_swap_cache(swp_entry_t entry); +extern int add_to_swap_cache(struct page *page, swp_entry_t entry, + gfp_t gfp, void **shadowp); +extern void __delete_from_swap_cache(struct page *page, + swp_entry_t entry, void *shadow); extern void delete_from_swap_cache(struct page *); +extern void clear_shadow_from_swap_cache(int type, unsigned long begin, + unsigned long end); extern void free_page_and_swap_cache(struct page *); extern void free_pages_and_swap_cache(struct page **, int); extern struct page *lookup_swap_cache(swp_entry_t entry, @@ -571,14 +574,19 @@ static inline int add_to_swap(struct page *page) return 0; } +static inline void *get_shadow_from_swap_cache(swp_entry_t entry) +{ + return NULL; +} + static inline int add_to_swap_cache(struct page *page, swp_entry_t entry, - gfp_t gfp_mask) + gfp_t gfp_mask, void **shadowp) { return -1; } static inline void __delete_from_swap_cache(struct page *page, - swp_entry_t entry) + swp_entry_t entry, void *shadow) { } @@ -586,6 +594,11 @@ static inline void delete_from_swap_cache(struct page *page) { } +static inline void clear_shadow_from_swap_cache(int type, unsigned long begin, + unsigned long end) +{ +} + static inline int page_swapcount(struct page *page) { return 0; diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index e07a83f9f093..dc2b827c81e5 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -263,7 +263,7 @@ static inline void addr_limit_user_check(void) return; #endif - if (CHECK_DATA_CORRUPTION(!segment_eq(get_fs(), USER_DS), + if (CHECK_DATA_CORRUPTION(uaccess_kernel(), "Invalid address limit on user-mode return")) force_sig(SIGKILL); @@ -1237,18 +1237,8 @@ asmlinkage long sys_ni_syscall(void); * Instead, use one of the functions which work equivalently, such as * the ksys_xyzyyz() functions prototyped below. */ - -int ksys_umount(char __user *name, int flags); -int ksys_dup(unsigned int fildes); -int ksys_chroot(const char __user *filename); ssize_t ksys_write(unsigned int fd, const char __user *buf, size_t count); -int ksys_chdir(const char __user *filename); -int ksys_fchmod(unsigned int fd, umode_t mode); int ksys_fchown(unsigned int fd, uid_t user, gid_t group); -int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent, - unsigned int count); -int ksys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg); -off_t ksys_lseek(unsigned int fd, off_t offset, unsigned int whence); ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count); void ksys_sync(void); int ksys_unshare(unsigned long unshare_flags); @@ -1282,68 +1272,6 @@ int compat_ksys_ipc(u32 call, int first, int second, * The following kernel syscall equivalents are just wrappers to fs-internal * functions. Therefore, provide stubs to be inlined at the callsites. */ -extern long do_unlinkat(int dfd, struct filename *name); - -static inline long ksys_unlink(const char __user *pathname) -{ - return do_unlinkat(AT_FDCWD, getname(pathname)); -} - -extern long do_rmdir(int dfd, const char __user *pathname); - -static inline long ksys_rmdir(const char __user *pathname) -{ - return do_rmdir(AT_FDCWD, pathname); -} - -extern long do_mkdirat(int dfd, const char __user *pathname, umode_t mode); - -static inline long ksys_mkdir(const char __user *pathname, umode_t mode) -{ - return do_mkdirat(AT_FDCWD, pathname, mode); -} - -extern long do_symlinkat(const char __user *oldname, int newdfd, - const char __user *newname); - -static inline long ksys_symlink(const char __user *oldname, - const char __user *newname) -{ - return do_symlinkat(oldname, AT_FDCWD, newname); -} - -extern long do_mknodat(int dfd, const char __user *filename, umode_t mode, - unsigned int dev); - -static inline long ksys_mknod(const char __user *filename, umode_t mode, - unsigned int dev) -{ - return do_mknodat(AT_FDCWD, filename, mode, dev); -} - -extern int do_linkat(int olddfd, const char __user *oldname, int newdfd, - const char __user *newname, int flags); - -static inline long ksys_link(const char __user *oldname, - const char __user *newname) -{ - return do_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0); -} - -extern int do_fchmodat(int dfd, const char __user *filename, umode_t mode); - -static inline int ksys_chmod(const char __user *filename, umode_t mode) -{ - return do_fchmodat(AT_FDCWD, filename, mode); -} - -long do_faccessat(int dfd, const char __user *filename, int mode, int flags); - -static inline long ksys_access(const char __user *filename, int mode) -{ - return do_faccessat(AT_FDCWD, filename, mode, 0); -} - extern int do_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group, int flag); @@ -1379,17 +1307,6 @@ static inline int ksys_close(unsigned int fd) return __close_fd(current->files, fd); } -extern long do_sys_open(int dfd, const char __user *filename, int flags, - umode_t mode); - -static inline long ksys_open(const char __user *filename, int flags, - umode_t mode) -{ - if (force_o_largefile()) - flags |= O_LARGEFILE; - return do_sys_open(AT_FDCWD, filename, flags, mode); -} - extern long do_sys_truncate(const char __user *pathname, loff_t length); static inline long ksys_truncate(const char __user *pathname, loff_t length) diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 216185bb3014..42ef807e5d84 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -37,18 +37,6 @@ struct thermal_cooling_device; struct thermal_instance; struct thermal_attr; -enum thermal_device_mode { - THERMAL_DEVICE_DISABLED = 0, - THERMAL_DEVICE_ENABLED, -}; - -enum thermal_trip_type { - THERMAL_TRIP_ACTIVE = 0, - THERMAL_TRIP_PASSIVE, - THERMAL_TRIP_HOT, - THERMAL_TRIP_CRITICAL, -}; - enum thermal_trend { THERMAL_TREND_STABLE, /* temperature is stable */ THERMAL_TREND_RAISING, /* temperature is raising */ @@ -76,9 +64,7 @@ struct thermal_zone_device_ops { struct thermal_cooling_device *); int (*get_temp) (struct thermal_zone_device *, int *); int (*set_trips) (struct thermal_zone_device *, int, int); - int (*get_mode) (struct thermal_zone_device *, - enum thermal_device_mode *); - int (*set_mode) (struct thermal_zone_device *, + int (*change_mode) (struct thermal_zone_device *, enum thermal_device_mode); int (*get_trip_type) (struct thermal_zone_device *, int, enum thermal_trip_type *); @@ -128,6 +114,7 @@ struct thermal_cooling_device { * @trip_temp_attrs: attributes for trip points for sysfs: trip temperature * @trip_type_attrs: attributes for trip points for sysfs: trip type * @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis + * @mode: current mode of this thermal zone * @devdata: private pointer for device private data * @trips: number of trip points the thermal zone supports * @trips_disabled; bitmap for disabled trips @@ -170,6 +157,7 @@ struct thermal_zone_device { struct thermal_attr *trip_temp_attrs; struct thermal_attr *trip_type_attrs; struct thermal_attr *trip_hyst_attrs; + enum thermal_device_mode mode; void *devdata; int trips; unsigned long trips_disabled; /* bitmap for disabled trips */ @@ -303,11 +291,6 @@ struct thermal_zone_params { int offset; }; -struct thermal_genl_event { - u32 orig; - enum events event; -}; - /** * struct thermal_zone_of_device_ops - scallbacks for handling DT based zones * @@ -416,6 +399,8 @@ int thermal_zone_get_offset(struct thermal_zone_device *tz); void thermal_cdev_update(struct thermal_cooling_device *); void thermal_notify_framework(struct thermal_zone_device *, int); +int thermal_zone_device_enable(struct thermal_zone_device *tz); +int thermal_zone_device_disable(struct thermal_zone_device *tz); #else static inline struct thermal_zone_device *thermal_zone_device_register( const char *type, int trips, int mask, void *devdata, @@ -463,6 +448,12 @@ static inline void thermal_cdev_update(struct thermal_cooling_device *cdev) static inline void thermal_notify_framework(struct thermal_zone_device *tz, int trip) { } + +static inline int thermal_zone_device_enable(struct thermal_zone_device *tz) +{ return -ENODEV; } + +static inline int thermal_zone_device_disable(struct thermal_zone_device *tz) +{ return -ENODEV; } #endif /* CONFIG_THERMAL */ #endif /* __THERMAL_H__ */ diff --git a/include/linux/time.h b/include/linux/time.h index 4c325bf44ce0..b142cb5f5a53 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -3,7 +3,6 @@ #define _LINUX_TIME_H # include <linux/cache.h> -# include <linux/seqlock.h> # include <linux/math64.h> # include <linux/time64.h> diff --git a/include/linux/trace.h b/include/linux/trace.h index 7fd86d3c691f..36d255d66f88 100644 --- a/include/linux/trace.h +++ b/include/linux/trace.h @@ -29,6 +29,7 @@ struct trace_array; void trace_printk_init_buffers(void); int trace_array_printk(struct trace_array *tr, unsigned long ip, const char *fmt, ...); +int trace_array_init_printk(struct trace_array *tr); void trace_array_put(struct trace_array *tr); struct trace_array *trace_array_get_by_name(const char *name); int trace_array_destroy(struct trace_array *tr); diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index a1fecf311621..598fec9f9dbf 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -116,8 +116,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) #define __TRACEPOINT_ENTRY(name) \ static tracepoint_ptr_t __tracepoint_ptr_##name __used \ - __attribute__((section("__tracepoints_ptrs"))) = \ - &__tracepoint_##name + __section(__tracepoints_ptrs) = &__tracepoint_##name #endif #endif /* _LINUX_TRACEPOINT_H */ @@ -280,9 +279,9 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) */ #define DEFINE_TRACE_FN(name, reg, unreg) \ static const char __tpstrtab_##name[] \ - __attribute__((section("__tracepoints_strings"))) = #name; \ - struct tracepoint __tracepoint_##name \ - __attribute__((section("__tracepoints"), used)) = \ + __section(__tracepoints_strings) = #name; \ + struct tracepoint __tracepoint_##name __used \ + __section(__tracepoints) = \ { __tpstrtab_##name, STATIC_KEY_INIT_FALSE, reg, unreg, NULL };\ __TRACEPOINT_ENTRY(name); @@ -361,7 +360,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) static const char *___tp_str __tracepoint_string = str; \ ___tp_str; \ }) -#define __tracepoint_string __attribute__((section("__tracepoint_str"))) +#define __tracepoint_string __used __section(__tracepoint_str) #else /* * tracepoint_string() is used to save the string address for userspace diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 0a76ddc07d59..94b285411659 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -6,11 +6,27 @@ #include <linux/sched.h> #include <linux/thread_info.h> -#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS) - #include <asm/uaccess.h> /* + * Force the uaccess routines to be wired up for actual userspace access, + * overriding any possible set_fs(KERNEL_DS) still lingering around. Undone + * using force_uaccess_end below. + */ +static inline mm_segment_t force_uaccess_begin(void) +{ + mm_segment_t fs = get_fs(); + + set_fs(USER_DS); + return fs; +} + +static inline void force_uaccess_end(mm_segment_t oldfs) +{ + set_fs(oldfs); +} + +/* * Architectures should provide two primitives (raw_copy_{to,from}_user()) * and get rid of their private instances of copy_{to,from}_user() and * __copy_{to,from}_user{,_inatomic}(). diff --git a/include/linux/uio.h b/include/linux/uio.h index 9576fd8158d7..3835a8a8e9ea 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -7,7 +7,6 @@ #include <linux/kernel.h> #include <linux/thread_info.h> -#include <crypto/hash.h> #include <uapi/linux/uio.h> struct page; diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 239db794357c..eae0bfd87d91 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -28,17 +28,28 @@ struct vdpa_notification_area { }; /** + * vDPA vq_state definition + * @avail_index: available index + */ +struct vdpa_vq_state { + u16 avail_index; +}; + +/** * vDPA device - representation of a vDPA device * @dev: underlying device * @dma_dev: the actual device that is performing DMA * @config: the configuration ops for this device. * @index: device index + * @features_valid: were features initialized? for legacy guests */ struct vdpa_device { struct device dev; struct device *dma_dev; const struct vdpa_config_ops *config; unsigned int index; + bool features_valid; + int nvqs; }; /** @@ -77,16 +88,22 @@ struct vdpa_device { * @set_vq_state: Set the state for a virtqueue * @vdev: vdpa device * @idx: virtqueue index - * @state: virtqueue state (last_avail_idx) + * @state: pointer to set virtqueue state (last_avail_idx) * Returns integer: success (0) or error (< 0) * @get_vq_state: Get the state for a virtqueue * @vdev: vdpa device * @idx: virtqueue index - * Returns virtqueue state (last_avail_idx) + * @state: pointer to returned state (last_avail_idx) * @get_vq_notification: Get the notification area for a virtqueue * @vdev: vdpa device * @idx: virtqueue index * Returns the notifcation area + * @get_vq_irq: Get the irq number of a virtqueue (optional, + * but must implemented if require vq irq offloading) + * @vdev: vdpa device + * @idx: virtqueue index + * Returns int: irq number of a virtqueue, + * negative number if no irq assigned. * @get_vq_align: Get the virtqueue align requirement * for the device * @vdev: vdpa device @@ -174,10 +191,14 @@ struct vdpa_config_ops { struct vdpa_callback *cb); void (*set_vq_ready)(struct vdpa_device *vdev, u16 idx, bool ready); bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx); - int (*set_vq_state)(struct vdpa_device *vdev, u16 idx, u64 state); - u64 (*get_vq_state)(struct vdpa_device *vdev, u16 idx); + int (*set_vq_state)(struct vdpa_device *vdev, u16 idx, + const struct vdpa_vq_state *state); + int (*get_vq_state)(struct vdpa_device *vdev, u16 idx, + struct vdpa_vq_state *state); struct vdpa_notification_area (*get_vq_notification)(struct vdpa_device *vdev, u16 idx); + /* vq irq is not expected to be changed once DRIVER_OK is set */ + int (*get_vq_irq)(struct vdpa_device *vdv, u16 idx); /* Device ops */ u32 (*get_vq_align)(struct vdpa_device *vdev); @@ -208,11 +229,12 @@ struct vdpa_config_ops { struct vdpa_device *__vdpa_alloc_device(struct device *parent, const struct vdpa_config_ops *config, + int nvqs, size_t size); -#define vdpa_alloc_device(dev_struct, member, parent, config) \ +#define vdpa_alloc_device(dev_struct, member, parent, config, nvqs) \ container_of(__vdpa_alloc_device( \ - parent, config, \ + parent, config, nvqs, \ sizeof(dev_struct) + \ BUILD_BUG_ON_ZERO(offsetof( \ dev_struct, member))), \ @@ -266,4 +288,36 @@ static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev) { return vdev->dma_dev; } + +static inline void vdpa_reset(struct vdpa_device *vdev) +{ + const struct vdpa_config_ops *ops = vdev->config; + + vdev->features_valid = false; + ops->set_status(vdev, 0); +} + +static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features) +{ + const struct vdpa_config_ops *ops = vdev->config; + + vdev->features_valid = true; + return ops->set_features(vdev, features); +} + + +static inline void vdpa_get_config(struct vdpa_device *vdev, unsigned offset, + void *buf, unsigned int len) +{ + const struct vdpa_config_ops *ops = vdev->config; + + /* + * Config accesses aren't supposed to trigger before features are set. + * If it does happen we assume a legacy guest. + */ + if (!vdev->features_valid) + vdpa_set_features(vdev, 0); + ops->get_config(vdev, offset, buf, len); +} + #endif /* _LINUX_VDPA_H */ diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h index 553b34c8b5f7..977caf96c8d2 100644 --- a/include/linux/vgaarb.h +++ b/include/linux/vgaarb.h @@ -110,12 +110,6 @@ static inline int vga_get_uninterruptible(struct pci_dev *pdev, } #if defined(CONFIG_VGA_ARB) -extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc); -#else -static inline int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { return 0; } -#endif - -#if defined(CONFIG_VGA_ARB) extern void vga_put(struct pci_dev *pdev, unsigned int rsrc); #else #define vga_put(pdev, rsrc) diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 16c0ed6c50a7..219037f4c08d 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -57,6 +57,7 @@ #define __LINUX_VIDEODEV2_H #include <linux/time.h> /* need struct timeval */ +#include <linux/kernel.h> #include <uapi/linux/videodev2.h> #endif /* __LINUX_VIDEODEV2_H */ diff --git a/include/linux/virtio_caif.h b/include/linux/virtio_caif.h index 5d2d3124ca3d..ea722479510c 100644 --- a/include/linux/virtio_caif.h +++ b/include/linux/virtio_caif.h @@ -11,9 +11,9 @@ #include <linux/types.h> struct virtio_caif_transf_config { - u16 headroom; - u16 tailroom; - u32 mtu; + __virtio16 headroom; + __virtio16 tailroom; + __virtio32 mtu; u8 reserved[4]; }; diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index bb4cc4910750..8fe857e27ef3 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -6,6 +6,7 @@ #include <linux/bug.h> #include <linux/virtio.h> #include <linux/virtio_byteorder.h> +#include <linux/compiler_types.h> #include <uapi/linux/virtio_config.h> struct irq_affinity; @@ -162,16 +163,16 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev, } /** - * virtio_has_iommu_quirk - determine whether this device has the iommu quirk + * virtio_has_dma_quirk - determine whether this device has the DMA quirk * @vdev: the device */ -static inline bool virtio_has_iommu_quirk(const struct virtio_device *vdev) +static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev) { /* * Note the reverse polarity of the quirk feature (compared to most * other features), this is for compatibility with legacy systems. */ - return !virtio_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); + return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM); } static inline @@ -287,70 +288,133 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) return __cpu_to_virtio64(virtio_is_little_endian(vdev), val); } +#define virtio_to_cpu(vdev, x) \ + _Generic((x), \ + __u8: (x), \ + __virtio16: virtio16_to_cpu((vdev), (x)), \ + __virtio32: virtio32_to_cpu((vdev), (x)), \ + __virtio64: virtio64_to_cpu((vdev), (x)) \ + ) + +#define cpu_to_virtio(vdev, x, m) \ + _Generic((m), \ + __u8: (x), \ + __virtio16: cpu_to_virtio16((vdev), (x)), \ + __virtio32: cpu_to_virtio32((vdev), (x)), \ + __virtio64: cpu_to_virtio64((vdev), (x)) \ + ) + +#define __virtio_native_type(structname, member) \ + typeof(virtio_to_cpu(NULL, ((structname*)0)->member)) + /* Config space accessors. */ #define virtio_cread(vdev, structname, member, ptr) \ do { \ + typeof(((structname*)0)->member) virtio_cread_v; \ + \ might_sleep(); \ - /* Must match the member's type, and be integer */ \ - if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \ - (*ptr) = 1; \ + /* Sanity check: must match the member's type */ \ + typecheck(typeof(virtio_to_cpu((vdev), virtio_cread_v)), *(ptr)); \ \ - switch (sizeof(*ptr)) { \ + switch (sizeof(virtio_cread_v)) { \ case 1: \ - *(ptr) = virtio_cread8(vdev, \ - offsetof(structname, member)); \ - break; \ case 2: \ - *(ptr) = virtio_cread16(vdev, \ - offsetof(structname, member)); \ - break; \ case 4: \ - *(ptr) = virtio_cread32(vdev, \ - offsetof(structname, member)); \ - break; \ - case 8: \ - *(ptr) = virtio_cread64(vdev, \ - offsetof(structname, member)); \ + vdev->config->get((vdev), \ + offsetof(structname, member), \ + &virtio_cread_v, \ + sizeof(virtio_cread_v)); \ break; \ default: \ - BUG(); \ + __virtio_cread_many((vdev), \ + offsetof(structname, member), \ + &virtio_cread_v, \ + 1, \ + sizeof(virtio_cread_v)); \ + break; \ } \ + *(ptr) = virtio_to_cpu(vdev, virtio_cread_v); \ } while(0) /* Config space accessors. */ #define virtio_cwrite(vdev, structname, member, ptr) \ do { \ + typeof(((structname*)0)->member) virtio_cwrite_v = \ + cpu_to_virtio(vdev, *(ptr), ((structname*)0)->member); \ + \ + might_sleep(); \ + /* Sanity check: must match the member's type */ \ + typecheck(typeof(virtio_to_cpu((vdev), virtio_cwrite_v)), *(ptr)); \ + \ + vdev->config->set((vdev), offsetof(structname, member), \ + &virtio_cwrite_v, \ + sizeof(virtio_cwrite_v)); \ + } while(0) + +/* + * Nothing virtio-specific about these, but let's worry about generalizing + * these later. + */ +#define virtio_le_to_cpu(x) \ + _Generic((x), \ + __u8: (u8)(x), \ + __le16: (u16)le16_to_cpu(x), \ + __le32: (u32)le32_to_cpu(x), \ + __le64: (u64)le64_to_cpu(x) \ + ) + +#define virtio_cpu_to_le(x, m) \ + _Generic((m), \ + __u8: (x), \ + __le16: cpu_to_le16(x), \ + __le32: cpu_to_le32(x), \ + __le64: cpu_to_le64(x) \ + ) + +/* LE (e.g. modern) Config space accessors. */ +#define virtio_cread_le(vdev, structname, member, ptr) \ + do { \ + typeof(((structname*)0)->member) virtio_cread_v; \ + \ might_sleep(); \ - /* Must match the member's type, and be integer */ \ - if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \ - BUG_ON((*ptr) == 1); \ + /* Sanity check: must match the member's type */ \ + typecheck(typeof(virtio_le_to_cpu(virtio_cread_v)), *(ptr)); \ \ - switch (sizeof(*ptr)) { \ + switch (sizeof(virtio_cread_v)) { \ case 1: \ - virtio_cwrite8(vdev, \ - offsetof(structname, member), \ - *(ptr)); \ - break; \ case 2: \ - virtio_cwrite16(vdev, \ - offsetof(structname, member), \ - *(ptr)); \ - break; \ case 4: \ - virtio_cwrite32(vdev, \ - offsetof(structname, member), \ - *(ptr)); \ - break; \ - case 8: \ - virtio_cwrite64(vdev, \ - offsetof(structname, member), \ - *(ptr)); \ + vdev->config->get((vdev), \ + offsetof(structname, member), \ + &virtio_cread_v, \ + sizeof(virtio_cread_v)); \ break; \ default: \ - BUG(); \ + __virtio_cread_many((vdev), \ + offsetof(structname, member), \ + &virtio_cread_v, \ + 1, \ + sizeof(virtio_cread_v)); \ + break; \ } \ + *(ptr) = virtio_le_to_cpu(virtio_cread_v); \ + } while(0) + +#define virtio_cwrite_le(vdev, structname, member, ptr) \ + do { \ + typeof(((structname*)0)->member) virtio_cwrite_v = \ + virtio_cpu_to_le(*(ptr), ((structname*)0)->member); \ + \ + might_sleep(); \ + /* Sanity check: must match the member's type */ \ + typecheck(typeof(virtio_le_to_cpu(virtio_cwrite_v)), *(ptr)); \ + \ + vdev->config->set((vdev), offsetof(structname, member), \ + &virtio_cwrite_v, \ + sizeof(virtio_cwrite_v)); \ } while(0) + /* Read @count fields, @bytes each. */ static inline void __virtio_cread_many(struct virtio_device *vdev, unsigned int offset, @@ -399,53 +463,60 @@ static inline void virtio_cwrite8(struct virtio_device *vdev, static inline u16 virtio_cread16(struct virtio_device *vdev, unsigned int offset) { - u16 ret; + __virtio16 ret; might_sleep(); vdev->config->get(vdev, offset, &ret, sizeof(ret)); - return virtio16_to_cpu(vdev, (__force __virtio16)ret); + return virtio16_to_cpu(vdev, ret); } static inline void virtio_cwrite16(struct virtio_device *vdev, unsigned int offset, u16 val) { + __virtio16 v; + might_sleep(); - val = (__force u16)cpu_to_virtio16(vdev, val); - vdev->config->set(vdev, offset, &val, sizeof(val)); + v = cpu_to_virtio16(vdev, val); + vdev->config->set(vdev, offset, &v, sizeof(v)); } static inline u32 virtio_cread32(struct virtio_device *vdev, unsigned int offset) { - u32 ret; + __virtio32 ret; might_sleep(); vdev->config->get(vdev, offset, &ret, sizeof(ret)); - return virtio32_to_cpu(vdev, (__force __virtio32)ret); + return virtio32_to_cpu(vdev, ret); } static inline void virtio_cwrite32(struct virtio_device *vdev, unsigned int offset, u32 val) { + __virtio32 v; + might_sleep(); - val = (__force u32)cpu_to_virtio32(vdev, val); - vdev->config->set(vdev, offset, &val, sizeof(val)); + v = cpu_to_virtio32(vdev, val); + vdev->config->set(vdev, offset, &v, sizeof(v)); } static inline u64 virtio_cread64(struct virtio_device *vdev, unsigned int offset) { - u64 ret; + __virtio64 ret; + __virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret)); - return virtio64_to_cpu(vdev, (__force __virtio64)ret); + return virtio64_to_cpu(vdev, ret); } static inline void virtio_cwrite64(struct virtio_device *vdev, unsigned int offset, u64 val) { + __virtio64 v; + might_sleep(); - val = (__force u64)cpu_to_virtio64(vdev, val); - vdev->config->set(vdev, offset, &val, sizeof(val)); + v = cpu_to_virtio64(vdev, val); + vdev->config->set(vdev, offset, &v, sizeof(v)); } /* Conditional config space accessors. */ @@ -459,4 +530,14 @@ static inline void virtio_cwrite64(struct virtio_device *vdev, _r; \ }) +/* Conditional config space accessors. */ +#define virtio_cread_le_feature(vdev, fbit, structname, member, ptr) \ + ({ \ + int _r = 0; \ + if (!virtio_has_feature(vdev, fbit)) \ + _r = -ENOENT; \ + else \ + virtio_cread_le((vdev), structname, member, ptr); \ + _r; \ + }) #endif /* _LINUX_VIRTIO_CONFIG_H */ diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index 3dc70adfe5f5..b485b13fa50b 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h @@ -46,16 +46,15 @@ static inline void virtio_wmb(bool weak_barriers) dma_wmb(); } -static inline void virtio_store_mb(bool weak_barriers, - __virtio16 *p, __virtio16 v) -{ - if (weak_barriers) { - virt_store_mb(*p, v); - } else { - WRITE_ONCE(*p, v); - mb(); - } -} +#define virtio_store_mb(weak_barriers, p, v) \ +do { \ + if (weak_barriers) { \ + virt_store_mb(*p, v); \ + } else { \ + WRITE_ONCE(*p, v); \ + mb(); \ + } \ +} while (0) \ struct virtio_device; struct virtqueue; diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 24fc7c3ae7d6..2e6ca53b9bbd 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -56,6 +56,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, #endif #ifdef CONFIG_MIGRATION PGMIGRATE_SUCCESS, PGMIGRATE_FAIL, + THP_MIGRATION_SUCCESS, + THP_MIGRATION_FAIL, + THP_MIGRATION_SPLIT, #endif #ifdef CONFIG_COMPACTION COMPACTMIGRATE_SCANNED, COMPACTFREE_SCANNED, diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index aa961088c551..91220ace31da 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -8,6 +8,7 @@ #include <linux/vm_event_item.h> #include <linux/atomic.h> #include <linux/static_key.h> +#include <linux/mmdebug.h> extern int sysctl_stat_interval; @@ -192,7 +193,8 @@ static inline unsigned long global_zone_page_state(enum zone_stat_item item) return x; } -static inline unsigned long global_node_page_state(enum node_stat_item item) +static inline +unsigned long global_node_page_state_pages(enum node_stat_item item) { long x = atomic_long_read(&vm_node_stat[item]); #ifdef CONFIG_SMP @@ -202,6 +204,13 @@ static inline unsigned long global_node_page_state(enum node_stat_item item) return x; } +static inline unsigned long global_node_page_state(enum node_stat_item item) +{ + VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); + + return global_node_page_state_pages(item); +} + static inline unsigned long zone_page_state(struct zone *zone, enum zone_stat_item item) { @@ -242,9 +251,12 @@ extern unsigned long sum_zone_node_page_state(int node, extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item); extern unsigned long node_page_state(struct pglist_data *pgdat, enum node_stat_item item); +extern unsigned long node_page_state_pages(struct pglist_data *pgdat, + enum node_stat_item item); #else #define sum_zone_node_page_state(node, item) global_zone_page_state(item) #define node_page_state(node, item) global_node_page_state(item) +#define node_page_state_pages(node, item) global_node_page_state_pages(item) #endif /* CONFIG_NUMA */ #ifdef CONFIG_SMP diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index abf5bccf906a..349e39c3ab60 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -74,8 +74,6 @@ int con_set_default_unimap(struct vc_data *vc); void con_free_unimap(struct vc_data *vc); int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc); -#define vc_translate(vc, c) ((vc)->vc_translate[(c) | \ - ((vc)->vc_toggle_meta ? 0x80 : 0)]) #else static inline int con_set_trans_old(unsigned char __user *table) { @@ -124,7 +122,6 @@ int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc) return 0; } -#define vc_translate(vc, c) (c) #endif /* vt.c */ diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h index 1464ce6ffa31..9b19e6bb68b5 100644 --- a/include/linux/watchdog.h +++ b/include/linux/watchdog.h @@ -210,6 +210,8 @@ extern int watchdog_init_timeout(struct watchdog_device *wdd, extern int watchdog_register_device(struct watchdog_device *); extern void watchdog_unregister_device(struct watchdog_device *); +int watchdog_set_last_hw_keepalive(struct watchdog_device *, unsigned int); + /* devres register variant */ int devm_watchdog_register_device(struct device *dev, struct watchdog_device *); diff --git a/include/linux/wkup_m3_ipc.h b/include/linux/wkup_m3_ipc.h index e497e621dbb7..3f496967b538 100644 --- a/include/linux/wkup_m3_ipc.h +++ b/include/linux/wkup_m3_ipc.h @@ -1,7 +1,7 @@ /* * TI Wakeup M3 for AMx3 SoCs Power Management Routines * - * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/ * Dave Gerlach <d-gerlach@ti.com> * * This program is free software; you can redistribute it and/or diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h index d7554252404c..850424e5d030 100644 --- a/include/linux/ww_mutex.h +++ b/include/linux/ww_mutex.h @@ -48,14 +48,6 @@ struct ww_acquire_ctx { #endif }; -struct ww_mutex { - struct mutex base; - struct ww_acquire_ctx *ctx; -#ifdef CONFIG_DEBUG_MUTEXES - struct ww_class *ww_class; -#endif -}; - #ifdef CONFIG_DEBUG_LOCK_ALLOC # define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) \ , .ww_class = class diff --git a/include/linux/xattr.h b/include/linux/xattr.h index c5afaf8ca7a2..10b4dc2709f0 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -52,14 +52,18 @@ ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t); ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size); int __vfs_setxattr(struct dentry *, struct inode *, const char *, const void *, size_t, int); int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int); +int __vfs_setxattr_locked(struct dentry *, const char *, const void *, size_t, int, struct inode **); int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int); int __vfs_removexattr(struct dentry *, const char *); +int __vfs_removexattr_locked(struct dentry *, const char *, struct inode **); int vfs_removexattr(struct dentry *, const char *); ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size); ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name, char **xattr_value, size_t size, gfp_t flags); +int xattr_supported_namespace(struct inode *inode, const char *prefix); + static inline const char *xattr_prefix(const struct xattr_handler *handler) { return handler->prefix ?: handler->name; diff --git a/include/linux/xxhash.h b/include/linux/xxhash.h index 52b073fea17f..df42511438d0 100644 --- a/include/linux/xxhash.h +++ b/include/linux/xxhash.h @@ -34,7 +34,7 @@ * ("BSD"). * * You can contact the author at: - * - xxHash homepage: http://cyan4973.github.io/xxHash/ + * - xxHash homepage: https://cyan4973.github.io/xxHash/ * - xxHash source repository: https://github.com/Cyan4973/xxHash */ diff --git a/include/linux/xz.h b/include/linux/xz.h index 64cffa6ddfce..9884c8440188 100644 --- a/include/linux/xz.h +++ b/include/linux/xz.h @@ -2,7 +2,7 @@ * XZ decompressor * * Authors: Lasse Collin <lasse.collin@tukaani.org> - * Igor Pavlov <http://7-zip.org/> + * Igor Pavlov <https://7-zip.org/> * * This file has been put into the public domain. * You can do whatever you want with this file. @@ -28,7 +28,7 @@ * enum xz_mode - Operation mode * * @XZ_SINGLE: Single-call mode. This uses less RAM than - * than multi-call modes, because the LZMA2 + * multi-call modes, because the LZMA2 * dictionary doesn't need to be allocated as * part of the decoder state. All required data * structures are allocated at initialization, diff --git a/include/linux/zlib.h b/include/linux/zlib.h index c757d848a758..78ede944c082 100644 --- a/include/linux/zlib.h +++ b/include/linux/zlib.h @@ -23,7 +23,7 @@ The data format used by the zlib library is described by RFCs (Request for - Comments) 1950 to 1952 in the files http://www.ietf.org/rfc/rfc1950.txt + Comments) 1950 to 1952 in the files https://www.ietf.org/rfc/rfc1950.txt (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format). */ |
