diff options
Diffstat (limited to 'drivers/base/devcoredump.c')
| -rw-r--r-- | drivers/base/devcoredump.c | 136 | 
1 files changed, 83 insertions, 53 deletions
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c index 37faf6156d7c..55bdc7f5e59d 100644 --- a/drivers/base/devcoredump.c +++ b/drivers/base/devcoredump.c @@ -23,50 +23,46 @@ struct devcd_entry {  	void *data;  	size_t datalen;  	/* -	 * Here, mutex is required to serialize the calls to del_wk work between -	 * user/kernel space which happens when devcd is added with device_add() -	 * and that sends uevent to user space. User space reads the uevents, -	 * and calls to devcd_data_write() which try to modify the work which is -	 * not even initialized/queued from devcoredump. +	 * There are 2 races for which mutex is required.  	 * +	 * The first race is between device creation and userspace writing to +	 * schedule immediately destruction.  	 * +	 * This race is handled by arming the timer before device creation, but +	 * when device creation fails the timer still exists.  	 * -	 *        cpu0(X)                                 cpu1(Y) +	 * To solve this, hold the mutex during device_add(), and set +	 * init_completed on success before releasing the mutex.  	 * -	 *        dev_coredump() uevent sent to user space -	 *        device_add()  ======================> user space process Y reads the -	 *                                              uevents writes to devcd fd -	 *                                              which results into writes to +	 * That way the timer will never fire until device_add() is called, +	 * it will do nothing if init_completed is not set. The timer is also +	 * cancelled in that case.  	 * -	 *                                             devcd_data_write() -	 *                                               mod_delayed_work() -	 *                                                 try_to_grab_pending() -	 *                                                   timer_delete() -	 *                                                     debug_assert_init() -	 *       INIT_DELAYED_WORK() -	 *       schedule_delayed_work() -	 * -	 * -	 * Also, mutex alone would not be enough to avoid scheduling of -	 * del_wk work after it get flush from a call to devcd_free() -	 * mentioned as below. -	 * -	 *	disabled_store() -	 *        devcd_free() -	 *          mutex_lock()             devcd_data_write() -	 *          flush_delayed_work() -	 *          mutex_unlock() -	 *                                   mutex_lock() -	 *                                   mod_delayed_work() -	 *                                   mutex_unlock() -	 * So, delete_work flag is required. +	 * The second race involves multiple parallel invocations of devcd_free(), +	 * add a deleted flag so only 1 can call the destructor.  	 */  	struct mutex mutex; -	bool delete_work; +	bool init_completed, deleted;  	struct module *owner;  	ssize_t (*read)(char *buffer, loff_t offset, size_t count,  			void *data, size_t datalen);  	void (*free)(void *data); +	/* +	 * If nothing interferes and device_add() was returns success, +	 * del_wk will destroy the device after the timer fires. +	 * +	 * Multiple userspace processes can interfere in the working of the timer: +	 * - Writing to the coredump will reschedule the timer to run immediately, +	 *   if still armed. +	 * +	 *   This is handled by using "if (cancel_delayed_work()) { +	 *   schedule_delayed_work() }", to prevent re-arming after having +	 *   been previously fired. +	 * - Writing to /sys/class/devcoredump/disabled will destroy the +	 *   coredump synchronously. +	 *   This is handled by using disable_delayed_work_sync(), and then +	 *   checking if deleted flag is set with &devcd->mutex held. +	 */  	struct delayed_work del_wk;  	struct device *failing_dev;  }; @@ -95,14 +91,27 @@ static void devcd_dev_release(struct device *dev)  	kfree(devcd);  } +static void __devcd_del(struct devcd_entry *devcd) +{ +	devcd->deleted = true; +	device_del(&devcd->devcd_dev); +	put_device(&devcd->devcd_dev); +} +  static void devcd_del(struct work_struct *wk)  {  	struct devcd_entry *devcd; +	bool init_completed;  	devcd = container_of(wk, struct devcd_entry, del_wk.work); -	device_del(&devcd->devcd_dev); -	put_device(&devcd->devcd_dev); +	/* devcd->mutex serializes against dev_coredumpm_timeout */ +	mutex_lock(&devcd->mutex); +	init_completed = devcd->init_completed; +	mutex_unlock(&devcd->mutex); + +	if (init_completed) +		__devcd_del(devcd);  }  static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj, @@ -122,12 +131,12 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,  	struct device *dev = kobj_to_dev(kobj);  	struct devcd_entry *devcd = dev_to_devcd(dev); -	mutex_lock(&devcd->mutex); -	if (!devcd->delete_work) { -		devcd->delete_work = true; -		mod_delayed_work(system_wq, &devcd->del_wk, 0); -	} -	mutex_unlock(&devcd->mutex); +	/* +	 * Although it's tempting to use mod_delayed work here, +	 * that will cause a reschedule if the timer already fired. +	 */ +	if (cancel_delayed_work(&devcd->del_wk)) +		schedule_delayed_work(&devcd->del_wk, 0);  	return count;  } @@ -151,11 +160,21 @@ static int devcd_free(struct device *dev, void *data)  {  	struct devcd_entry *devcd = dev_to_devcd(dev); +	/* +	 * To prevent a race with devcd_data_write(), disable work and +	 * complete manually instead. +	 * +	 * We cannot rely on the return value of +	 * disable_delayed_work_sync() here, because it might be in the +	 * middle of a cancel_delayed_work + schedule_delayed_work pair. +	 * +	 * devcd->mutex here guards against multiple parallel invocations +	 * of devcd_free(). +	 */ +	disable_delayed_work_sync(&devcd->del_wk);  	mutex_lock(&devcd->mutex); -	if (!devcd->delete_work) -		devcd->delete_work = true; - -	flush_delayed_work(&devcd->del_wk); +	if (!devcd->deleted) +		__devcd_del(devcd);  	mutex_unlock(&devcd->mutex);  	return 0;  } @@ -179,12 +198,10 @@ static ssize_t disabled_show(const struct class *class, const struct class_attri   *                                                                 put_device() <- last reference   *             error = fn(dev, data)                           devcd_dev_release()   *             devcd_free(dev, data)                           kfree(devcd) - *             mutex_lock(&devcd->mutex);   *   *   * In the above diagram, it looks like disabled_store() would be racing with parallelly - * running devcd_del() and result in memory abort while acquiring devcd->mutex which - * is called after kfree of devcd memory after dropping its last reference with + * running devcd_del() and result in memory abort after dropping its last reference with   * put_device(). However, this will not happens as fn(dev, data) runs   * with its own reference to device via klist_node so it is not its last reference.   * so, above situation would not occur. @@ -374,7 +391,7 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,  	devcd->read = read;  	devcd->free = free;  	devcd->failing_dev = get_device(dev); -	devcd->delete_work = false; +	devcd->deleted = false;  	mutex_init(&devcd->mutex);  	device_initialize(&devcd->devcd_dev); @@ -383,8 +400,14 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,  		     atomic_inc_return(&devcd_count));  	devcd->devcd_dev.class = &devcd_class; -	mutex_lock(&devcd->mutex);  	dev_set_uevent_suppress(&devcd->devcd_dev, true); + +	/* devcd->mutex prevents devcd_del() completing until init finishes */ +	mutex_lock(&devcd->mutex); +	devcd->init_completed = false; +	INIT_DELAYED_WORK(&devcd->del_wk, devcd_del); +	schedule_delayed_work(&devcd->del_wk, timeout); +  	if (device_add(&devcd->devcd_dev))  		goto put_device; @@ -401,13 +424,20 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,  	dev_set_uevent_suppress(&devcd->devcd_dev, false);  	kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD); -	INIT_DELAYED_WORK(&devcd->del_wk, devcd_del); -	schedule_delayed_work(&devcd->del_wk, timeout); + +	/* +	 * Safe to run devcd_del() now that we are done with devcd_dev. +	 * Alternatively we could have taken a ref on devcd_dev before +	 * dropping the lock. +	 */ +	devcd->init_completed = true;  	mutex_unlock(&devcd->mutex);  	return;   put_device: -	put_device(&devcd->devcd_dev);  	mutex_unlock(&devcd->mutex); +	cancel_delayed_work_sync(&devcd->del_wk); +	put_device(&devcd->devcd_dev); +   put_module:  	module_put(owner);   free:  | 
