module: remove rmmod --wait option.
The option to wait for a module reference count to reach zero was in the initial module implementation, but it was never supported in modprobe (you had to use rmmod --wait). After discussion with Lucas, It has been deprecated (with a 10 second sleep) in kmod for the last year. This finally removes it: the flag will evoke a printk warning and a normal (non-blocking) remove attempt. Cc: Lucas De Marchi <lucas.de.marchi@gmail.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
parent
d8524ae9d6
commit
3f2b9c9cdf
2 changed files with 8 additions and 41 deletions
|
@ -367,9 +367,6 @@ struct module
|
||||||
/* What modules do I depend on? */
|
/* What modules do I depend on? */
|
||||||
struct list_head target_list;
|
struct list_head target_list;
|
||||||
|
|
||||||
/* Who is waiting for us to be unloaded */
|
|
||||||
struct task_struct *waiter;
|
|
||||||
|
|
||||||
/* Destruction function. */
|
/* Destruction function. */
|
||||||
void (*exit)(void);
|
void (*exit)(void);
|
||||||
|
|
||||||
|
|
|
@ -644,8 +644,6 @@ static int module_unload_init(struct module *mod)
|
||||||
|
|
||||||
/* Hold reference count during initialization. */
|
/* Hold reference count during initialization. */
|
||||||
__this_cpu_write(mod->refptr->incs, 1);
|
__this_cpu_write(mod->refptr->incs, 1);
|
||||||
/* Backwards compatibility macros put refcount during init. */
|
|
||||||
mod->waiter = current;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -771,16 +769,9 @@ static int __try_stop_module(void *_sref)
|
||||||
|
|
||||||
static int try_stop_module(struct module *mod, int flags, int *forced)
|
static int try_stop_module(struct module *mod, int flags, int *forced)
|
||||||
{
|
{
|
||||||
if (flags & O_NONBLOCK) {
|
|
||||||
struct stopref sref = { mod, flags, forced };
|
struct stopref sref = { mod, flags, forced };
|
||||||
|
|
||||||
return stop_machine(__try_stop_module, &sref, NULL);
|
return stop_machine(__try_stop_module, &sref, NULL);
|
||||||
} else {
|
|
||||||
/* We don't need to stop the machine for this. */
|
|
||||||
mod->state = MODULE_STATE_GOING;
|
|
||||||
synchronize_sched();
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned long module_refcount(struct module *mod)
|
unsigned long module_refcount(struct module *mod)
|
||||||
|
@ -813,21 +804,6 @@ EXPORT_SYMBOL(module_refcount);
|
||||||
/* This exists whether we can unload or not */
|
/* This exists whether we can unload or not */
|
||||||
static void free_module(struct module *mod);
|
static void free_module(struct module *mod);
|
||||||
|
|
||||||
static void wait_for_zero_refcount(struct module *mod)
|
|
||||||
{
|
|
||||||
/* Since we might sleep for some time, release the mutex first */
|
|
||||||
mutex_unlock(&module_mutex);
|
|
||||||
for (;;) {
|
|
||||||
pr_debug("Looking at refcount...\n");
|
|
||||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
|
||||||
if (module_refcount(mod) == 0)
|
|
||||||
break;
|
|
||||||
schedule();
|
|
||||||
}
|
|
||||||
current->state = TASK_RUNNING;
|
|
||||||
mutex_lock(&module_mutex);
|
|
||||||
}
|
|
||||||
|
|
||||||
SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
|
SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
|
||||||
unsigned int, flags)
|
unsigned int, flags)
|
||||||
{
|
{
|
||||||
|
@ -842,6 +818,11 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
name[MODULE_NAME_LEN-1] = '\0';
|
name[MODULE_NAME_LEN-1] = '\0';
|
||||||
|
|
||||||
|
if (!(flags & O_NONBLOCK)) {
|
||||||
|
printk(KERN_WARNING
|
||||||
|
"waiting module removal not supported: please upgrade");
|
||||||
|
}
|
||||||
|
|
||||||
if (mutex_lock_interruptible(&module_mutex) != 0)
|
if (mutex_lock_interruptible(&module_mutex) != 0)
|
||||||
return -EINTR;
|
return -EINTR;
|
||||||
|
|
||||||
|
@ -859,8 +840,7 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
|
||||||
|
|
||||||
/* Doing init or already dying? */
|
/* Doing init or already dying? */
|
||||||
if (mod->state != MODULE_STATE_LIVE) {
|
if (mod->state != MODULE_STATE_LIVE) {
|
||||||
/* FIXME: if (force), slam module count and wake up
|
/* FIXME: if (force), slam module count damn the torpedoes */
|
||||||
waiter --RR */
|
|
||||||
pr_debug("%s already dying\n", mod->name);
|
pr_debug("%s already dying\n", mod->name);
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -876,18 +856,11 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Set this up before setting mod->state */
|
|
||||||
mod->waiter = current;
|
|
||||||
|
|
||||||
/* Stop the machine so refcounts can't move and disable module. */
|
/* Stop the machine so refcounts can't move and disable module. */
|
||||||
ret = try_stop_module(mod, flags, &forced);
|
ret = try_stop_module(mod, flags, &forced);
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/* Never wait if forced. */
|
|
||||||
if (!forced && module_refcount(mod) != 0)
|
|
||||||
wait_for_zero_refcount(mod);
|
|
||||||
|
|
||||||
mutex_unlock(&module_mutex);
|
mutex_unlock(&module_mutex);
|
||||||
/* Final destruction now no one is using it. */
|
/* Final destruction now no one is using it. */
|
||||||
if (mod->exit != NULL)
|
if (mod->exit != NULL)
|
||||||
|
@ -1005,9 +978,6 @@ void module_put(struct module *module)
|
||||||
__this_cpu_inc(module->refptr->decs);
|
__this_cpu_inc(module->refptr->decs);
|
||||||
|
|
||||||
trace_module_put(module, _RET_IP_);
|
trace_module_put(module, _RET_IP_);
|
||||||
/* Maybe they're waiting for us to drop reference? */
|
|
||||||
if (unlikely(!module_is_live(module)))
|
|
||||||
wake_up_process(module->waiter);
|
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue