mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/
synced 2025-04-19 20:58:31 +09:00
Modules changes for 6.15-rc1
- Use RCU instead of RCU-sched The mix of rcu_read_lock(), rcu_read_lock_sched() and preempt_disable() in the module code and its users has been replaced with just rcu_read_lock(). - The rest of changes are smaller fixes and updates. The changes have been on linux-next for at least 2 weeks, with the RCU cleanup present for 2 months. One performance problem was reported with the RCU change when KASAN + lockdep were enabled, but it was effectively addressed by the already merged ee57ab5a3212 ("locking/lockdep: Disable KASAN instrumentation of lockdep.c"). -----BEGIN PGP SIGNATURE----- iQFIBAABCAAyFiEEIduBR9MnFA82q/jtumpXJwqY6poFAmfmwrsUHHBldHIucGF2 bHVAc3VzZS5jb20ACgkQumpXJwqY6prWxgf/S7Pvdywm10vJ6fooYa+GxXNMwhyh XRjZ4m9gjeTNf2KLwX0XHv0XZeFHOmHfjd3iI+pS6CXZnCFTN9J3XPLYsrTxXUb6 U6zzLf8Zsz8TzeI4dgvSBsZln7oICSACkAgdJCq23hpNKeaeRo91dgiZaIwyZJG3 FekqSFtP7pYhfFoNkrFKysqbgl1+RWWZ79L2qRJA0bPzVFlvRUuh6cOHQw+8RMqf BYLwnArjTkW8AcXpxIXSiwphDHVZ81B96xoplavyoprA5FDpv1W+8y4DtxdWFn+1 QVWCs/ZV3KrwXWpZev625w3fIOOIXILqRINOzLfvXTw+1xFS3TzSQEpVeg== =4OKc -----END PGP SIGNATURE----- Merge tag 'modules-6.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/modules/linux Pull modules updates from Petr Pavlu: - Use RCU instead of RCU-sched The mix of rcu_read_lock(), rcu_read_lock_sched() and preempt_disable() in the module code and its users has been replaced with just rcu_read_lock() - The rest of changes are smaller fixes and updates * tag 'modules-6.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/modules/linux: (32 commits) MAINTAINERS: Update the MODULE SUPPORT section module: Remove unnecessary size argument when calling strscpy() module: Replace deprecated strncpy() with strscpy() params: Annotate struct module_param_attrs with __counted_by() bug: Use RCU instead RCU-sched to protect module_bug_list. static_call: Use RCU in all users of __module_text_address(). kprobes: Use RCU in all users of __module_text_address(). bpf: Use RCU in all users of __module_text_address(). jump_label: Use RCU in all users of __module_text_address(). jump_label: Use RCU in all users of __module_address(). x86: Use RCU in all users of __module_address(). cfi: Use RCU while invoking __module_address(). powerpc/ftrace: Use RCU in all users of __module_text_address(). LoongArch: ftrace: Use RCU in all users of __module_text_address(). LoongArch/orc: Use RCU in all users of __module_address(). arm64: module: Use RCU in all users of __module_text_address(). ARM: module: Use RCU in all users of __module_text_address(). module: Use RCU in all users of __module_text_address(). module: Use RCU in all users of __module_address(). module: Use RCU in search_module_extables(). ...
This commit is contained in:
commit
01d5b167dc
@ -16210,7 +16210,7 @@ F: include/dt-bindings/clock/mobileye,eyeq5-clk.h
|
||||
|
||||
MODULE SUPPORT
|
||||
M: Luis Chamberlain <mcgrof@kernel.org>
|
||||
R: Petr Pavlu <petr.pavlu@suse.com>
|
||||
M: Petr Pavlu <petr.pavlu@suse.com>
|
||||
R: Sami Tolvanen <samitolvanen@google.com>
|
||||
R: Daniel Gomez <da.gomez@samsung.com>
|
||||
L: linux-modules@vger.kernel.org
|
||||
@ -16221,8 +16221,10 @@ F: include/linux/kmod.h
|
||||
F: include/linux/module*.h
|
||||
F: kernel/module/
|
||||
F: lib/test_kmod.c
|
||||
F: lib/tests/module/
|
||||
F: scripts/module*
|
||||
F: tools/testing/selftests/kmod/
|
||||
F: tools/testing/selftests/module/
|
||||
|
||||
MONOLITHIC POWER SYSTEM PMIC DRIVER
|
||||
M: Saravanan Sekar <sravanhome@gmail.com>
|
||||
|
@ -285,11 +285,9 @@ bool in_module_plt(unsigned long loc)
|
||||
struct module *mod;
|
||||
bool ret;
|
||||
|
||||
preempt_disable();
|
||||
guard(rcu)();
|
||||
mod = __module_text_address(loc);
|
||||
ret = mod && (loc - (u32)mod->arch.core.plt_ent < mod->arch.core.plt_count * PLT_ENT_SIZE ||
|
||||
loc - (u32)mod->arch.init.plt_ent < mod->arch.init.plt_count * PLT_ENT_SIZE);
|
||||
preempt_enable();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -320,14 +320,13 @@ static bool ftrace_find_callable_addr(struct dyn_ftrace *rec,
|
||||
* dealing with an out-of-range condition, we can assume it
|
||||
* is due to a module being loaded far away from the kernel.
|
||||
*
|
||||
* NOTE: __module_text_address() must be called with preemption
|
||||
* disabled, but we can rely on ftrace_lock to ensure that 'mod'
|
||||
* NOTE: __module_text_address() must be called within a RCU read
|
||||
* section, but we can rely on ftrace_lock to ensure that 'mod'
|
||||
* retains its validity throughout the remainder of this code.
|
||||
*/
|
||||
if (!mod) {
|
||||
preempt_disable();
|
||||
guard(rcu)();
|
||||
mod = __module_text_address(pc);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
if (WARN_ON(!mod))
|
||||
|
@ -85,14 +85,13 @@ static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, struct module *mod
|
||||
* dealing with an out-of-range condition, we can assume it
|
||||
* is due to a module being loaded far away from the kernel.
|
||||
*
|
||||
* NOTE: __module_text_address() must be called with preemption
|
||||
* disabled, but we can rely on ftrace_lock to ensure that 'mod'
|
||||
* NOTE: __module_text_address() must be called within a RCU read
|
||||
* section, but we can rely on ftrace_lock to ensure that 'mod'
|
||||
* retains its validity throughout the remainder of this code.
|
||||
*/
|
||||
if (!mod) {
|
||||
preempt_disable();
|
||||
mod = __module_text_address(pc);
|
||||
preempt_enable();
|
||||
scoped_guard(rcu)
|
||||
mod = __module_text_address(pc);
|
||||
}
|
||||
|
||||
if (WARN_ON(!mod))
|
||||
|
@ -399,7 +399,7 @@ bool unwind_next_frame(struct unwind_state *state)
|
||||
return false;
|
||||
|
||||
/* Don't let modules unload while we're reading their ORC data. */
|
||||
preempt_disable();
|
||||
guard(rcu)();
|
||||
|
||||
if (is_entry_func(state->pc))
|
||||
goto end;
|
||||
@ -514,14 +514,12 @@ bool unwind_next_frame(struct unwind_state *state)
|
||||
if (!__kernel_text_address(state->pc))
|
||||
goto err;
|
||||
|
||||
preempt_enable();
|
||||
return true;
|
||||
|
||||
err:
|
||||
state->error = true;
|
||||
|
||||
end:
|
||||
preempt_enable();
|
||||
state->stack_info.type = STACK_TYPE_UNKNOWN;
|
||||
return false;
|
||||
}
|
||||
|
@ -115,10 +115,8 @@ static unsigned long ftrace_lookup_module_stub(unsigned long ip, unsigned long a
|
||||
{
|
||||
struct module *mod = NULL;
|
||||
|
||||
preempt_disable();
|
||||
mod = __module_text_address(ip);
|
||||
preempt_enable();
|
||||
|
||||
scoped_guard(rcu)
|
||||
mod = __module_text_address(ip);
|
||||
if (!mod)
|
||||
pr_err("No module loaded at addr=%lx\n", ip);
|
||||
|
||||
|
@ -120,10 +120,8 @@ static struct module *ftrace_lookup_module(struct dyn_ftrace *rec)
|
||||
{
|
||||
struct module *mod;
|
||||
|
||||
preempt_disable();
|
||||
mod = __module_text_address(rec->ip);
|
||||
preempt_enable();
|
||||
|
||||
scoped_guard(rcu)
|
||||
mod = __module_text_address(rec->ip);
|
||||
if (!mod)
|
||||
pr_err("No module loaded at addr=%lx\n", rec->ip);
|
||||
|
||||
|
@ -98,11 +98,10 @@ static inline bool within_module_coretext(void *addr)
|
||||
#ifdef CONFIG_MODULES
|
||||
struct module *mod;
|
||||
|
||||
preempt_disable();
|
||||
guard(rcu)();
|
||||
mod = __module_address((unsigned long)addr);
|
||||
if (mod && within_module_core((unsigned long)addr, mod))
|
||||
ret = true;
|
||||
preempt_enable();
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
@ -476,7 +476,7 @@ bool unwind_next_frame(struct unwind_state *state)
|
||||
return false;
|
||||
|
||||
/* Don't let modules unload while we're reading their ORC data. */
|
||||
preempt_disable();
|
||||
guard(rcu)();
|
||||
|
||||
/* End-of-stack check for user tasks: */
|
||||
if (state->regs && user_mode(state->regs))
|
||||
@ -669,14 +669,12 @@ bool unwind_next_frame(struct unwind_state *state)
|
||||
goto err;
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
return true;
|
||||
|
||||
err:
|
||||
state->error = true;
|
||||
|
||||
the_end:
|
||||
preempt_enable();
|
||||
state->stack_info.type = STACK_TYPE_UNKNOWN;
|
||||
return false;
|
||||
}
|
||||
|
@ -55,12 +55,11 @@ static inline void *dereference_symbol_descriptor(void *ptr)
|
||||
if (is_ksym_addr((unsigned long)ptr))
|
||||
return ptr;
|
||||
|
||||
preempt_disable();
|
||||
guard(rcu)();
|
||||
mod = __module_address((unsigned long)ptr);
|
||||
|
||||
if (mod)
|
||||
ptr = dereference_module_function_descriptor(mod, ptr);
|
||||
preempt_enable();
|
||||
#endif
|
||||
return ptr;
|
||||
}
|
||||
|
@ -665,7 +665,7 @@ static inline bool within_module(unsigned long addr, const struct module *mod)
|
||||
return within_module_init(addr, mod) || within_module_core(addr, mod);
|
||||
}
|
||||
|
||||
/* Search for module by name: must be in a RCU-sched critical section. */
|
||||
/* Search for module by name: must be in a RCU critical section. */
|
||||
struct module *find_module(const char *name);
|
||||
|
||||
extern void __noreturn __module_put_and_kthread_exit(struct module *mod,
|
||||
|
@ -73,14 +73,11 @@ static bool is_module_cfi_trap(unsigned long addr)
|
||||
struct module *mod;
|
||||
bool found = false;
|
||||
|
||||
rcu_read_lock_sched_notrace();
|
||||
|
||||
guard(rcu)();
|
||||
mod = __module_address(addr);
|
||||
if (mod)
|
||||
found = is_trap(addr, mod->kcfi_traps, mod->kcfi_traps_end);
|
||||
|
||||
rcu_read_unlock_sched_notrace();
|
||||
|
||||
return found;
|
||||
}
|
||||
#else /* CONFIG_MODULES */
|
||||
|
@ -653,13 +653,12 @@ static int __jump_label_mod_text_reserved(void *start, void *end)
|
||||
struct module *mod;
|
||||
int ret;
|
||||
|
||||
preempt_disable();
|
||||
mod = __module_text_address((unsigned long)start);
|
||||
WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
|
||||
if (!try_module_get(mod))
|
||||
mod = NULL;
|
||||
preempt_enable();
|
||||
|
||||
scoped_guard(rcu) {
|
||||
mod = __module_text_address((unsigned long)start);
|
||||
WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
|
||||
if (!try_module_get(mod))
|
||||
mod = NULL;
|
||||
}
|
||||
if (!mod)
|
||||
return 0;
|
||||
|
||||
@ -746,9 +745,9 @@ static int jump_label_add_module(struct module *mod)
|
||||
kfree(jlm);
|
||||
return -ENOMEM;
|
||||
}
|
||||
preempt_disable();
|
||||
jlm2->mod = __module_address((unsigned long)key);
|
||||
preempt_enable();
|
||||
scoped_guard(rcu)
|
||||
jlm2->mod = __module_address((unsigned long)key);
|
||||
|
||||
jlm2->entries = static_key_entries(key);
|
||||
jlm2->next = NULL;
|
||||
static_key_set_mod(key, jlm2);
|
||||
@ -906,13 +905,13 @@ static void jump_label_update(struct static_key *key)
|
||||
return;
|
||||
}
|
||||
|
||||
preempt_disable();
|
||||
mod = __module_address((unsigned long)key);
|
||||
if (mod) {
|
||||
stop = mod->jump_entries + mod->num_jump_entries;
|
||||
init = mod->state == MODULE_STATE_COMING;
|
||||
scoped_guard(rcu) {
|
||||
mod = __module_address((unsigned long)key);
|
||||
if (mod) {
|
||||
stop = mod->jump_entries + mod->num_jump_entries;
|
||||
init = mod->state == MODULE_STATE_COMING;
|
||||
}
|
||||
}
|
||||
preempt_enable();
|
||||
#endif
|
||||
entry = static_key_entries(key);
|
||||
/* if there are no users, entry can be NULL */
|
||||
|
@ -1547,7 +1547,7 @@ static int check_kprobe_address_safe(struct kprobe *p,
|
||||
/* Ensure the address is in a text area, and find a module if exists. */
|
||||
*probed_mod = NULL;
|
||||
if (!core_kernel_text((unsigned long) p->addr)) {
|
||||
guard(preempt)();
|
||||
guard(rcu)();
|
||||
*probed_mod = __module_text_address((unsigned long) p->addr);
|
||||
if (!(*probed_mod))
|
||||
return -EINVAL;
|
||||
|
@ -59,7 +59,7 @@ static void klp_find_object_module(struct klp_object *obj)
|
||||
if (!klp_is_module(obj))
|
||||
return;
|
||||
|
||||
rcu_read_lock_sched();
|
||||
guard(rcu)();
|
||||
/*
|
||||
* We do not want to block removal of patched modules and therefore
|
||||
* we do not take a reference here. The patches are removed by
|
||||
@ -75,8 +75,6 @@ static void klp_find_object_module(struct klp_object *obj)
|
||||
*/
|
||||
if (mod && mod->klp_alive)
|
||||
obj->mod = mod;
|
||||
|
||||
rcu_read_unlock_sched();
|
||||
}
|
||||
|
||||
static bool klp_initialized(void)
|
||||
|
@ -124,17 +124,6 @@ char *module_next_tag_pair(char *string, unsigned long *secsize);
|
||||
#define for_each_modinfo_entry(entry, info, name) \
|
||||
for (entry = get_modinfo(info, name); entry; entry = get_next_modinfo(info, name, entry))
|
||||
|
||||
static inline void module_assert_mutex_or_preempt(void)
|
||||
{
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
if (unlikely(!debug_locks))
|
||||
return;
|
||||
|
||||
WARN_ON_ONCE(!rcu_read_lock_sched_held() &&
|
||||
!lockdep_is_held(&module_mutex));
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline unsigned long kernel_symbol_value(const struct kernel_symbol *sym)
|
||||
{
|
||||
#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
|
||||
|
@ -177,19 +177,15 @@ void add_kallsyms(struct module *mod, const struct load_info *info)
|
||||
unsigned long strtab_size;
|
||||
void *data_base = mod->mem[MOD_DATA].base;
|
||||
void *init_data_base = mod->mem[MOD_INIT_DATA].base;
|
||||
struct mod_kallsyms *kallsyms;
|
||||
|
||||
/* Set up to point into init section. */
|
||||
mod->kallsyms = (void __rcu *)init_data_base +
|
||||
info->mod_kallsyms_init_off;
|
||||
kallsyms = init_data_base + info->mod_kallsyms_init_off;
|
||||
|
||||
rcu_read_lock();
|
||||
/* The following is safe since this pointer cannot change */
|
||||
rcu_dereference(mod->kallsyms)->symtab = (void *)symsec->sh_addr;
|
||||
rcu_dereference(mod->kallsyms)->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
|
||||
kallsyms->symtab = (void *)symsec->sh_addr;
|
||||
kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
|
||||
/* Make sure we get permanent strtab: don't use info->strtab. */
|
||||
rcu_dereference(mod->kallsyms)->strtab =
|
||||
(void *)info->sechdrs[info->index.str].sh_addr;
|
||||
rcu_dereference(mod->kallsyms)->typetab = init_data_base + info->init_typeoffs;
|
||||
kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
|
||||
kallsyms->typetab = init_data_base + info->init_typeoffs;
|
||||
|
||||
/*
|
||||
* Now populate the cut down core kallsyms for after init
|
||||
@ -199,20 +195,19 @@ void add_kallsyms(struct module *mod, const struct load_info *info)
|
||||
mod->core_kallsyms.strtab = s = data_base + info->stroffs;
|
||||
mod->core_kallsyms.typetab = data_base + info->core_typeoffs;
|
||||
strtab_size = info->core_typeoffs - info->stroffs;
|
||||
src = rcu_dereference(mod->kallsyms)->symtab;
|
||||
for (ndst = i = 0; i < rcu_dereference(mod->kallsyms)->num_symtab; i++) {
|
||||
rcu_dereference(mod->kallsyms)->typetab[i] = elf_type(src + i, info);
|
||||
src = kallsyms->symtab;
|
||||
for (ndst = i = 0; i < kallsyms->num_symtab; i++) {
|
||||
kallsyms->typetab[i] = elf_type(src + i, info);
|
||||
if (i == 0 || is_livepatch_module(mod) ||
|
||||
is_core_symbol(src + i, info->sechdrs, info->hdr->e_shnum,
|
||||
info->index.pcpu)) {
|
||||
ssize_t ret;
|
||||
|
||||
mod->core_kallsyms.typetab[ndst] =
|
||||
rcu_dereference(mod->kallsyms)->typetab[i];
|
||||
kallsyms->typetab[i];
|
||||
dst[ndst] = src[i];
|
||||
dst[ndst++].st_name = s - mod->core_kallsyms.strtab;
|
||||
ret = strscpy(s,
|
||||
&rcu_dereference(mod->kallsyms)->strtab[src[i].st_name],
|
||||
ret = strscpy(s, &kallsyms->strtab[src[i].st_name],
|
||||
strtab_size);
|
||||
if (ret < 0)
|
||||
break;
|
||||
@ -220,7 +215,9 @@ void add_kallsyms(struct module *mod, const struct load_info *info)
|
||||
strtab_size -= ret + 1;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
/* Set up to point into init section. */
|
||||
rcu_assign_pointer(mod->kallsyms, kallsyms);
|
||||
mod->core_kallsyms.num_symtab = ndst;
|
||||
}
|
||||
|
||||
@ -260,7 +257,7 @@ static const char *find_kallsyms_symbol(struct module *mod,
|
||||
{
|
||||
unsigned int i, best = 0;
|
||||
unsigned long nextval, bestval;
|
||||
struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
|
||||
struct mod_kallsyms *kallsyms = rcu_dereference(mod->kallsyms);
|
||||
struct module_memory *mod_mem;
|
||||
|
||||
/* At worse, next value is at end of module */
|
||||
@ -319,7 +316,7 @@ void * __weak dereference_module_function_descriptor(struct module *mod,
|
||||
|
||||
/*
|
||||
* For kallsyms to ask for address resolution. NULL means not found. Careful
|
||||
* not to lock to avoid deadlock on oopses, simply disable preemption.
|
||||
* not to lock to avoid deadlock on oopses, RCU is enough.
|
||||
*/
|
||||
int module_address_lookup(unsigned long addr,
|
||||
unsigned long *size,
|
||||
@ -332,7 +329,7 @@ int module_address_lookup(unsigned long addr,
|
||||
int ret = 0;
|
||||
struct module *mod;
|
||||
|
||||
preempt_disable();
|
||||
guard(rcu)();
|
||||
mod = __module_address(addr);
|
||||
if (mod) {
|
||||
if (modname)
|
||||
@ -350,8 +347,6 @@ int module_address_lookup(unsigned long addr,
|
||||
if (sym)
|
||||
ret = strscpy(namebuf, sym, KSYM_NAME_LEN);
|
||||
}
|
||||
preempt_enable();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -359,7 +354,7 @@ int lookup_module_symbol_name(unsigned long addr, char *symname)
|
||||
{
|
||||
struct module *mod;
|
||||
|
||||
preempt_disable();
|
||||
guard(rcu)();
|
||||
list_for_each_entry_rcu(mod, &modules, list) {
|
||||
if (mod->state == MODULE_STATE_UNFORMED)
|
||||
continue;
|
||||
@ -371,12 +366,10 @@ int lookup_module_symbol_name(unsigned long addr, char *symname)
|
||||
goto out;
|
||||
|
||||
strscpy(symname, sym, KSYM_NAME_LEN);
|
||||
preempt_enable();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
out:
|
||||
preempt_enable();
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
@ -385,13 +378,13 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
|
||||
{
|
||||
struct module *mod;
|
||||
|
||||
preempt_disable();
|
||||
guard(rcu)();
|
||||
list_for_each_entry_rcu(mod, &modules, list) {
|
||||
struct mod_kallsyms *kallsyms;
|
||||
|
||||
if (mod->state == MODULE_STATE_UNFORMED)
|
||||
continue;
|
||||
kallsyms = rcu_dereference_sched(mod->kallsyms);
|
||||
kallsyms = rcu_dereference(mod->kallsyms);
|
||||
if (symnum < kallsyms->num_symtab) {
|
||||
const Elf_Sym *sym = &kallsyms->symtab[symnum];
|
||||
|
||||
@ -400,12 +393,10 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
|
||||
strscpy(name, kallsyms_symbol_name(kallsyms, symnum), KSYM_NAME_LEN);
|
||||
strscpy(module_name, mod->name, MODULE_NAME_LEN);
|
||||
*exported = is_exported(name, *value, mod);
|
||||
preempt_enable();
|
||||
return 0;
|
||||
}
|
||||
symnum -= kallsyms->num_symtab;
|
||||
}
|
||||
preempt_enable();
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
@ -413,7 +404,7 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
|
||||
static unsigned long __find_kallsyms_symbol_value(struct module *mod, const char *name)
|
||||
{
|
||||
unsigned int i;
|
||||
struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
|
||||
struct mod_kallsyms *kallsyms = rcu_dereference(mod->kallsyms);
|
||||
|
||||
for (i = 0; i < kallsyms->num_symtab; i++) {
|
||||
const Elf_Sym *sym = &kallsyms->symtab[i];
|
||||
@ -453,23 +444,15 @@ static unsigned long __module_kallsyms_lookup_name(const char *name)
|
||||
/* Look for this name: can be of form module:name. */
|
||||
unsigned long module_kallsyms_lookup_name(const char *name)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
/* Don't lock: we're in enough trouble already. */
|
||||
preempt_disable();
|
||||
ret = __module_kallsyms_lookup_name(name);
|
||||
preempt_enable();
|
||||
return ret;
|
||||
guard(rcu)();
|
||||
return __module_kallsyms_lookup_name(name);
|
||||
}
|
||||
|
||||
unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
preempt_disable();
|
||||
ret = __find_kallsyms_symbol_value(mod, name);
|
||||
preempt_enable();
|
||||
return ret;
|
||||
guard(rcu)();
|
||||
return __find_kallsyms_symbol_value(mod, name);
|
||||
}
|
||||
|
||||
int module_kallsyms_on_each_symbol(const char *modname,
|
||||
@ -490,10 +473,8 @@ int module_kallsyms_on_each_symbol(const char *modname,
|
||||
if (modname && strcmp(modname, mod->name))
|
||||
continue;
|
||||
|
||||
/* Use rcu_dereference_sched() to remain compliant with the sparse tool */
|
||||
preempt_disable();
|
||||
kallsyms = rcu_dereference_sched(mod->kallsyms);
|
||||
preempt_enable();
|
||||
kallsyms = rcu_dereference_check(mod->kallsyms,
|
||||
lockdep_is_held(&module_mutex));
|
||||
|
||||
for (i = 0; i < kallsyms->num_symtab; i++) {
|
||||
const Elf_Sym *sym = &kallsyms->symtab[i];
|
||||
|
@ -67,7 +67,7 @@
|
||||
|
||||
/*
|
||||
* Mutex protects:
|
||||
* 1) List of modules (also safely readable with preempt_disable),
|
||||
* 1) List of modules (also safely readable within RCU read section),
|
||||
* 2) module_use links,
|
||||
* 3) mod_tree.addr_min/mod_tree.addr_max.
|
||||
* (delete and add uses RCU list operations).
|
||||
@ -331,7 +331,7 @@ static bool find_exported_symbol_in_section(const struct symsearch *syms,
|
||||
|
||||
/*
|
||||
* Find an exported symbol and return it, along with, (optional) crc and
|
||||
* (optional) module which owns it. Needs preempt disabled or module_mutex.
|
||||
* (optional) module which owns it. Needs RCU or module_mutex.
|
||||
*/
|
||||
bool find_symbol(struct find_symbol_arg *fsa)
|
||||
{
|
||||
@ -345,8 +345,6 @@ bool find_symbol(struct find_symbol_arg *fsa)
|
||||
struct module *mod;
|
||||
unsigned int i;
|
||||
|
||||
module_assert_mutex_or_preempt();
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(arr); i++)
|
||||
if (find_exported_symbol_in_section(&arr[i], NULL, fsa))
|
||||
return true;
|
||||
@ -374,16 +372,14 @@ bool find_symbol(struct find_symbol_arg *fsa)
|
||||
}
|
||||
|
||||
/*
|
||||
* Search for module by name: must hold module_mutex (or preempt disabled
|
||||
* for read-only access).
|
||||
* Search for module by name: must hold module_mutex (or RCU for read-only
|
||||
* access).
|
||||
*/
|
||||
struct module *find_module_all(const char *name, size_t len,
|
||||
bool even_unformed)
|
||||
{
|
||||
struct module *mod;
|
||||
|
||||
module_assert_mutex_or_preempt();
|
||||
|
||||
list_for_each_entry_rcu(mod, &modules, list,
|
||||
lockdep_is_held(&module_mutex)) {
|
||||
if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
|
||||
@ -454,8 +450,7 @@ bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
|
||||
struct module *mod;
|
||||
unsigned int cpu;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
guard(rcu)();
|
||||
list_for_each_entry_rcu(mod, &modules, list) {
|
||||
if (mod->state == MODULE_STATE_UNFORMED)
|
||||
continue;
|
||||
@ -472,13 +467,10 @@ bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
|
||||
per_cpu_ptr(mod->percpu,
|
||||
get_boot_cpu_id());
|
||||
}
|
||||
preempt_enable();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -795,8 +787,8 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
|
||||
async_synchronize_full();
|
||||
|
||||
/* Store the name and taints of the last unloaded module for diagnostic purposes */
|
||||
strscpy(last_unloaded_module.name, mod->name, sizeof(last_unloaded_module.name));
|
||||
strscpy(last_unloaded_module.taints, module_flags(mod, buf, false), sizeof(last_unloaded_module.taints));
|
||||
strscpy(last_unloaded_module.name, mod->name);
|
||||
strscpy(last_unloaded_module.taints, module_flags(mod, buf, false));
|
||||
|
||||
free_module(mod);
|
||||
/* someone could wait for the module in add_unformed_module() */
|
||||
@ -814,10 +806,9 @@ void __symbol_put(const char *symbol)
|
||||
.gplok = true,
|
||||
};
|
||||
|
||||
preempt_disable();
|
||||
guard(rcu)();
|
||||
BUG_ON(!find_symbol(&fsa));
|
||||
module_put(fsa.owner);
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL(__symbol_put);
|
||||
|
||||
@ -832,13 +823,12 @@ void symbol_put_addr(void *addr)
|
||||
|
||||
/*
|
||||
* Even though we hold a reference on the module; we still need to
|
||||
* disable preemption in order to safely traverse the data structure.
|
||||
* RCU read section in order to safely traverse the data structure.
|
||||
*/
|
||||
preempt_disable();
|
||||
guard(rcu)();
|
||||
modaddr = __module_text_address(a);
|
||||
BUG_ON(!modaddr);
|
||||
module_put(modaddr);
|
||||
preempt_enable();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(symbol_put_addr);
|
||||
|
||||
@ -1189,7 +1179,7 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod,
|
||||
|
||||
getname:
|
||||
/* We must make copy under the lock if we failed to get ref. */
|
||||
strncpy(ownername, module_name(fsa.owner), MODULE_NAME_LEN);
|
||||
strscpy(ownername, module_name(fsa.owner), MODULE_NAME_LEN);
|
||||
unlock:
|
||||
mutex_unlock(&module_mutex);
|
||||
return fsa.sym;
|
||||
@ -1341,7 +1331,7 @@ static void free_module(struct module *mod)
|
||||
mod_tree_remove(mod);
|
||||
/* Remove this module from bug list, this uses list_del_rcu */
|
||||
module_bug_cleanup(mod);
|
||||
/* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */
|
||||
/* Wait for RCU synchronizing before releasing mod->list and buglist. */
|
||||
synchronize_rcu();
|
||||
if (try_add_tainted_module(mod))
|
||||
pr_err("%s: adding tainted module to the unloaded tainted modules list failed.\n",
|
||||
@ -1364,21 +1354,18 @@ void *__symbol_get(const char *symbol)
|
||||
.warn = true,
|
||||
};
|
||||
|
||||
preempt_disable();
|
||||
if (!find_symbol(&fsa))
|
||||
goto fail;
|
||||
if (fsa.license != GPL_ONLY) {
|
||||
pr_warn("failing symbol_get of non-GPLONLY symbol %s.\n",
|
||||
symbol);
|
||||
goto fail;
|
||||
scoped_guard(rcu) {
|
||||
if (!find_symbol(&fsa))
|
||||
return NULL;
|
||||
if (fsa.license != GPL_ONLY) {
|
||||
pr_warn("failing symbol_get of non-GPLONLY symbol %s.\n",
|
||||
symbol);
|
||||
return NULL;
|
||||
}
|
||||
if (strong_try_module_get(fsa.owner))
|
||||
return NULL;
|
||||
}
|
||||
if (strong_try_module_get(fsa.owner))
|
||||
goto fail;
|
||||
preempt_enable();
|
||||
return (void *)kernel_symbol_value(fsa.sym);
|
||||
fail:
|
||||
preempt_enable();
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__symbol_get);
|
||||
|
||||
@ -3013,7 +3000,7 @@ static noinline int do_init_module(struct module *mod)
|
||||
#endif
|
||||
/*
|
||||
* We want to free module_init, but be aware that kallsyms may be
|
||||
* walking this with preempt disabled. In all the failure paths, we
|
||||
* walking this within an RCU read section. In all the failure paths, we
|
||||
* call synchronize_rcu(), but we don't want to slow down the success
|
||||
* path. execmem_free() cannot be called in an interrupt, so do the
|
||||
* work and call synchronize_rcu() in a work queue.
|
||||
@ -3680,28 +3667,23 @@ out:
|
||||
/* Given an address, look for it in the module exception tables. */
|
||||
const struct exception_table_entry *search_module_extables(unsigned long addr)
|
||||
{
|
||||
const struct exception_table_entry *e = NULL;
|
||||
struct module *mod;
|
||||
|
||||
preempt_disable();
|
||||
guard(rcu)();
|
||||
mod = __module_address(addr);
|
||||
if (!mod)
|
||||
goto out;
|
||||
return NULL;
|
||||
|
||||
if (!mod->num_exentries)
|
||||
goto out;
|
||||
|
||||
e = search_extable(mod->extable,
|
||||
mod->num_exentries,
|
||||
addr);
|
||||
out:
|
||||
preempt_enable();
|
||||
|
||||
return NULL;
|
||||
/*
|
||||
* Now, if we found one, we are running inside it now, hence
|
||||
* we cannot unload the module, hence no refcnt needed.
|
||||
* The address passed here belongs to a module that is currently
|
||||
* invoked (we are running inside it). Therefore its module::refcnt
|
||||
* needs already be >0 to ensure that it is not removed at this stage.
|
||||
* All other user need to invoke this function within a RCU read
|
||||
* section.
|
||||
*/
|
||||
return e;
|
||||
return search_extable(mod->extable, mod->num_exentries, addr);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -3713,20 +3695,15 @@ out:
|
||||
*/
|
||||
bool is_module_address(unsigned long addr)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
preempt_disable();
|
||||
ret = __module_address(addr) != NULL;
|
||||
preempt_enable();
|
||||
|
||||
return ret;
|
||||
guard(rcu)();
|
||||
return __module_address(addr) != NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* __module_address() - get the module which contains an address.
|
||||
* @addr: the address.
|
||||
*
|
||||
* Must be called with preempt disabled or module mutex held so that
|
||||
* Must be called within RCU read section or module mutex held so that
|
||||
* module doesn't get freed during this.
|
||||
*/
|
||||
struct module *__module_address(unsigned long addr)
|
||||
@ -3744,8 +3721,6 @@ struct module *__module_address(unsigned long addr)
|
||||
return NULL;
|
||||
|
||||
lookup:
|
||||
module_assert_mutex_or_preempt();
|
||||
|
||||
mod = mod_find(addr, &mod_tree);
|
||||
if (mod) {
|
||||
BUG_ON(!within_module(addr, mod));
|
||||
@ -3765,20 +3740,15 @@ lookup:
|
||||
*/
|
||||
bool is_module_text_address(unsigned long addr)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
preempt_disable();
|
||||
ret = __module_text_address(addr) != NULL;
|
||||
preempt_enable();
|
||||
|
||||
return ret;
|
||||
guard(rcu)();
|
||||
return __module_text_address(addr) != NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* __module_text_address() - get the module whose code contains an address.
|
||||
* @addr: the address.
|
||||
*
|
||||
* Must be called with preempt disabled or module mutex held so that
|
||||
* Must be called within RCU read section or module mutex held so that
|
||||
* module doesn't get freed during this.
|
||||
*/
|
||||
struct module *__module_text_address(unsigned long addr)
|
||||
@ -3801,7 +3771,7 @@ void print_modules(void)
|
||||
|
||||
printk(KERN_DEFAULT "Modules linked in:");
|
||||
/* Most callers should already have preempt disabled, but make sure */
|
||||
preempt_disable();
|
||||
guard(rcu)();
|
||||
list_for_each_entry_rcu(mod, &modules, list) {
|
||||
if (mod->state == MODULE_STATE_UNFORMED)
|
||||
continue;
|
||||
@ -3809,7 +3779,6 @@ void print_modules(void)
|
||||
}
|
||||
|
||||
print_unloaded_tainted_modules();
|
||||
preempt_enable();
|
||||
if (last_unloaded_module.name[0])
|
||||
pr_cont(" [last unloaded: %s%s]", last_unloaded_module.name,
|
||||
last_unloaded_module.taints);
|
||||
|
@ -21,8 +21,6 @@ int try_add_tainted_module(struct module *mod)
|
||||
{
|
||||
struct mod_unload_taint *mod_taint;
|
||||
|
||||
module_assert_mutex_or_preempt();
|
||||
|
||||
if (!mod->taints)
|
||||
goto out;
|
||||
|
||||
|
@ -12,11 +12,11 @@
|
||||
|
||||
/*
|
||||
* Use a latched RB-tree for __module_address(); this allows us to use
|
||||
* RCU-sched lookups of the address from any context.
|
||||
* RCU lookups of the address from any context.
|
||||
*
|
||||
* This is conditional on PERF_EVENTS || TRACING because those can really hit
|
||||
* __module_address() hard by doing a lot of stack unwinding; potentially from
|
||||
* NMI context.
|
||||
* This is conditional on PERF_EVENTS || TRACING || CFI_CLANG because those can
|
||||
* really hit __module_address() hard by doing a lot of stack unwinding;
|
||||
* potentially from NMI context.
|
||||
*/
|
||||
|
||||
static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n)
|
||||
|
@ -79,17 +79,17 @@ int check_modstruct_version(const struct load_info *info,
|
||||
.name = "module_layout",
|
||||
.gplok = true,
|
||||
};
|
||||
bool have_symbol;
|
||||
|
||||
/*
|
||||
* Since this should be found in kernel (which can't be removed), no
|
||||
* locking is necessary -- use preempt_disable() to placate lockdep.
|
||||
* locking is necessary. Regardless use a RCU read section to keep
|
||||
* lockdep happy.
|
||||
*/
|
||||
preempt_disable();
|
||||
if (!find_symbol(&fsa)) {
|
||||
preempt_enable();
|
||||
BUG();
|
||||
}
|
||||
preempt_enable();
|
||||
scoped_guard(rcu)
|
||||
have_symbol = find_symbol(&fsa);
|
||||
BUG_ON(!have_symbol);
|
||||
|
||||
return check_version(info, "module_layout", mod, fsa.crc);
|
||||
}
|
||||
|
||||
|
@ -551,7 +551,7 @@ struct module_param_attrs
|
||||
{
|
||||
unsigned int num;
|
||||
struct attribute_group grp;
|
||||
struct param_attribute attrs[];
|
||||
struct param_attribute attrs[] __counted_by(num);
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SYSFS
|
||||
@ -651,35 +651,32 @@ static __modinit int add_sysfs_param(struct module_kobject *mk,
|
||||
}
|
||||
|
||||
/* Enlarge allocations. */
|
||||
new_mp = krealloc(mk->mp,
|
||||
sizeof(*mk->mp) +
|
||||
sizeof(mk->mp->attrs[0]) * (mk->mp->num + 1),
|
||||
new_mp = krealloc(mk->mp, struct_size(mk->mp, attrs, mk->mp->num + 1),
|
||||
GFP_KERNEL);
|
||||
if (!new_mp)
|
||||
return -ENOMEM;
|
||||
mk->mp = new_mp;
|
||||
mk->mp->num++;
|
||||
|
||||
/* Extra pointer for NULL terminator */
|
||||
new_attrs = krealloc(mk->mp->grp.attrs,
|
||||
sizeof(mk->mp->grp.attrs[0]) * (mk->mp->num + 2),
|
||||
GFP_KERNEL);
|
||||
new_attrs = krealloc_array(mk->mp->grp.attrs, mk->mp->num + 1,
|
||||
sizeof(mk->mp->grp.attrs[0]), GFP_KERNEL);
|
||||
if (!new_attrs)
|
||||
return -ENOMEM;
|
||||
mk->mp->grp.attrs = new_attrs;
|
||||
|
||||
/* Tack new one on the end. */
|
||||
memset(&mk->mp->attrs[mk->mp->num], 0, sizeof(mk->mp->attrs[0]));
|
||||
sysfs_attr_init(&mk->mp->attrs[mk->mp->num].mattr.attr);
|
||||
mk->mp->attrs[mk->mp->num].param = kp;
|
||||
mk->mp->attrs[mk->mp->num].mattr.show = param_attr_show;
|
||||
memset(&mk->mp->attrs[mk->mp->num - 1], 0, sizeof(mk->mp->attrs[0]));
|
||||
sysfs_attr_init(&mk->mp->attrs[mk->mp->num - 1].mattr.attr);
|
||||
mk->mp->attrs[mk->mp->num - 1].param = kp;
|
||||
mk->mp->attrs[mk->mp->num - 1].mattr.show = param_attr_show;
|
||||
/* Do not allow runtime DAC changes to make param writable. */
|
||||
if ((kp->perm & (S_IWUSR | S_IWGRP | S_IWOTH)) != 0)
|
||||
mk->mp->attrs[mk->mp->num].mattr.store = param_attr_store;
|
||||
mk->mp->attrs[mk->mp->num - 1].mattr.store = param_attr_store;
|
||||
else
|
||||
mk->mp->attrs[mk->mp->num].mattr.store = NULL;
|
||||
mk->mp->attrs[mk->mp->num].mattr.attr.name = (char *)name;
|
||||
mk->mp->attrs[mk->mp->num].mattr.attr.mode = kp->perm;
|
||||
mk->mp->num++;
|
||||
mk->mp->attrs[mk->mp->num - 1].mattr.store = NULL;
|
||||
mk->mp->attrs[mk->mp->num - 1].mattr.attr.name = (char *)name;
|
||||
mk->mp->attrs[mk->mp->num - 1].mattr.attr.mode = kp->perm;
|
||||
|
||||
/* Fix up all the pointers, since krealloc can move us */
|
||||
for (i = 0; i < mk->mp->num; i++)
|
||||
|
@ -325,13 +325,12 @@ static int __static_call_mod_text_reserved(void *start, void *end)
|
||||
struct module *mod;
|
||||
int ret;
|
||||
|
||||
preempt_disable();
|
||||
mod = __module_text_address((unsigned long)start);
|
||||
WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
|
||||
if (!try_module_get(mod))
|
||||
mod = NULL;
|
||||
preempt_enable();
|
||||
|
||||
scoped_guard(rcu) {
|
||||
mod = __module_text_address((unsigned long)start);
|
||||
WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
|
||||
if (!try_module_get(mod))
|
||||
mod = NULL;
|
||||
}
|
||||
if (!mod)
|
||||
return 0;
|
||||
|
||||
|
@ -2338,10 +2338,9 @@ void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
|
||||
{
|
||||
struct module *mod;
|
||||
|
||||
preempt_disable();
|
||||
guard(rcu)();
|
||||
mod = __module_address((unsigned long)btp);
|
||||
module_put(mod);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
@ -2925,18 +2924,21 @@ static int get_modules_for_addrs(struct module ***mods, unsigned long *addrs, u3
|
||||
u32 i, err = 0;
|
||||
|
||||
for (i = 0; i < addrs_cnt; i++) {
|
||||
bool skip_add = false;
|
||||
struct module *mod;
|
||||
|
||||
preempt_disable();
|
||||
mod = __module_address(addrs[i]);
|
||||
/* Either no module or we it's already stored */
|
||||
if (!mod || has_module(&arr, mod)) {
|
||||
preempt_enable();
|
||||
continue;
|
||||
scoped_guard(rcu) {
|
||||
mod = __module_address(addrs[i]);
|
||||
/* Either no module or it's already stored */
|
||||
if (!mod || has_module(&arr, mod)) {
|
||||
skip_add = true;
|
||||
break; /* scoped_guard */
|
||||
}
|
||||
if (!try_module_get(mod))
|
||||
err = -EINVAL;
|
||||
}
|
||||
if (!try_module_get(mod))
|
||||
err = -EINVAL;
|
||||
preempt_enable();
|
||||
if (skip_add)
|
||||
continue;
|
||||
if (err)
|
||||
break;
|
||||
err = add_module(&arr, mod);
|
||||
|
@ -124,9 +124,8 @@ static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
|
||||
if (!p)
|
||||
return true;
|
||||
*p = '\0';
|
||||
rcu_read_lock_sched();
|
||||
ret = !!find_module(tk->symbol);
|
||||
rcu_read_unlock_sched();
|
||||
scoped_guard(rcu)
|
||||
ret = !!find_module(tk->symbol);
|
||||
*p = ':';
|
||||
|
||||
return ret;
|
||||
@ -796,12 +795,10 @@ static struct module *try_module_get_by_name(const char *name)
|
||||
{
|
||||
struct module *mod;
|
||||
|
||||
rcu_read_lock_sched();
|
||||
guard(rcu)();
|
||||
mod = find_module(name);
|
||||
if (mod && !try_module_get(mod))
|
||||
mod = NULL;
|
||||
rcu_read_unlock_sched();
|
||||
|
||||
return mod;
|
||||
}
|
||||
#else
|
||||
|
22
lib/bug.c
22
lib/bug.c
@ -66,23 +66,19 @@ static LIST_HEAD(module_bug_list);
|
||||
|
||||
static struct bug_entry *module_find_bug(unsigned long bugaddr)
|
||||
{
|
||||
struct bug_entry *bug;
|
||||
struct module *mod;
|
||||
struct bug_entry *bug = NULL;
|
||||
|
||||
rcu_read_lock_sched();
|
||||
guard(rcu)();
|
||||
list_for_each_entry_rcu(mod, &module_bug_list, bug_list) {
|
||||
unsigned i;
|
||||
|
||||
bug = mod->bug_table;
|
||||
for (i = 0; i < mod->num_bugs; ++i, ++bug)
|
||||
if (bugaddr == bug_addr(bug))
|
||||
goto out;
|
||||
return bug;
|
||||
}
|
||||
bug = NULL;
|
||||
out:
|
||||
rcu_read_unlock_sched();
|
||||
|
||||
return bug;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
|
||||
@ -235,11 +231,11 @@ void generic_bug_clear_once(void)
|
||||
#ifdef CONFIG_MODULES
|
||||
struct module *mod;
|
||||
|
||||
rcu_read_lock_sched();
|
||||
list_for_each_entry_rcu(mod, &module_bug_list, bug_list)
|
||||
clear_once_table(mod->bug_table,
|
||||
mod->bug_table + mod->num_bugs);
|
||||
rcu_read_unlock_sched();
|
||||
scoped_guard(rcu) {
|
||||
list_for_each_entry_rcu(mod, &module_bug_list, bug_list)
|
||||
clear_once_table(mod->bug_table,
|
||||
mod->bug_table + mod->num_bugs);
|
||||
}
|
||||
#endif
|
||||
|
||||
clear_once_table(__start___bug_table, __stop___bug_table);
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
TARGET=$(basename $1)
|
||||
DIR=lib/tests/module
|
||||
|
Loading…
x
Reference in New Issue
Block a user