From: Michael Jeanson Date: Mon, 13 Sep 2021 16:00:38 +0000 (-0400) Subject: fix: cpu/hotplug: Remove deprecated CPU-hotplug functions. (v5.15) X-Git-Tag: v2.12.7~9 X-Git-Url: http://git.liburcu.org/?p=lttng-modules.git;a=commitdiff_plain;h=4a6ea683a440dc224dc4a9628ff4f713c42f5922 fix: cpu/hotplug: Remove deprecated CPU-hotplug functions. (v5.15) The CPU-hotplug functions get|put_online_cpus() were deprecated in v4.13 and removed in v5.15. See upstream commits : commit 8c854303ce0e38e5bbedd725ff39da7e235865d8 Author: Sebastian Andrzej Siewior Date: Tue Aug 3 16:16:21 2021 +0200 cpu/hotplug: Remove deprecated CPU-hotplug functions. No users in tree use the deprecated CPU-hotplug functions anymore. Remove them. Introduced in v4.13 : commit 8f553c498e1772cccb39a114da4a498d22992758 Author: Thomas Gleixner Date: Wed May 24 10:15:12 2017 +0200 cpu/hotplug: Provide cpus_read|write_[un]lock() The counting 'rwsem' hackery of get|put_online_cpus() is going to be replaced by percpu rwsem. Rename the functions to make it clear that it's locking and not some refcount style interface. These new functions will be used for the preparatory patches which make the code ready for the percpu rwsem conversion. Rename all instances in the cpu hotplug code while at it. Change-Id: I5a37cf5afc075a402b7347989fac637dfa60a1ed Signed-off-by: Michael Jeanson Signed-off-by: Mathieu Desnoyers --- diff --git a/lib/ringbuffer/ring_buffer_backend.c b/lib/ringbuffer/ring_buffer_backend.c index a49bb79b..94bdc107 100644 --- a/lib/ringbuffer/ring_buffer_backend.c +++ b/lib/ringbuffer/ring_buffer_backend.c @@ -12,10 +12,10 @@ #include #include #include -#include #include #include +#include #include #include /* for wrapper_vmalloc_sync_mappings() */ #include @@ -445,14 +445,14 @@ int channel_backend_init(struct channel_backend *chanb, chanb->cpu_hp_notifier.priority = 5; register_hotcpu_notifier(&chanb->cpu_hp_notifier); - get_online_cpus(); + lttng_cpus_read_lock(); for_each_online_cpu(i) { ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i), chanb, i); if (ret) goto free_bufs; /* cpu hotplug locked */ } - put_online_cpus(); + lttng_cpus_read_unlock(); #else for_each_possible_cpu(i) { ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i), @@ -485,7 +485,7 @@ free_bufs: */ #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ #ifdef CONFIG_HOTPLUG_CPU - put_online_cpus(); + lttng_cpus_read_unlock(); unregister_hotcpu_notifier(&chanb->cpu_hp_notifier); #endif #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ diff --git a/lib/ringbuffer/ring_buffer_frontend.c b/lib/ringbuffer/ring_buffer_frontend.c index 9121088f..7993633c 100644 --- a/lib/ringbuffer/ring_buffer_frontend.c +++ b/lib/ringbuffer/ring_buffer_frontend.c @@ -48,6 +48,7 @@ #include #include #include +#include #include #include #include @@ -708,7 +709,7 @@ static void channel_unregister_notifiers(struct channel *chan) int cpu; #ifdef CONFIG_HOTPLUG_CPU - get_online_cpus(); + lttng_cpus_read_lock(); chan->cpu_hp_enable = 0; for_each_online_cpu(cpu) { struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, @@ -716,7 +717,7 @@ static void channel_unregister_notifiers(struct channel *chan) lib_ring_buffer_stop_switch_timer(buf); lib_ring_buffer_stop_read_timer(buf); } - put_online_cpus(); + lttng_cpus_read_unlock(); unregister_cpu_notifier(&chan->cpu_hp_notifier); #else for_each_possible_cpu(cpu) { @@ -756,14 +757,14 @@ void lib_ring_buffer_set_quiescent_channel(struct channel *chan) const struct lib_ring_buffer_config *config = &chan->backend.config; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { - get_online_cpus(); + lttng_cpus_read_lock(); for_each_channel_cpu(cpu, chan) { struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu); lib_ring_buffer_set_quiescent(buf); } - put_online_cpus(); + lttng_cpus_read_unlock(); } else { struct lib_ring_buffer *buf = chan->backend.buf; @@ -778,14 +779,14 @@ void lib_ring_buffer_clear_quiescent_channel(struct channel *chan) const struct lib_ring_buffer_config *config = &chan->backend.config; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { - get_online_cpus(); + lttng_cpus_read_lock(); for_each_channel_cpu(cpu, chan) { struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu); lib_ring_buffer_clear_quiescent(buf); } - put_online_cpus(); + lttng_cpus_read_unlock(); } else { struct lib_ring_buffer *buf = chan->backend.buf; @@ -882,7 +883,7 @@ struct channel *channel_create(const struct lib_ring_buffer_config *config, chan->cpu_hp_notifier.priority = 6; register_cpu_notifier(&chan->cpu_hp_notifier); - get_online_cpus(); + lttng_cpus_read_lock(); for_each_online_cpu(cpu) { struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu); @@ -892,7 +893,7 @@ struct channel *channel_create(const struct lib_ring_buffer_config *config, spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu)); } chan->cpu_hp_enable = 1; - put_online_cpus(); + lttng_cpus_read_unlock(); #else for_each_possible_cpu(cpu) { struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, diff --git a/lib/ringbuffer/ring_buffer_iterator.c b/lib/ringbuffer/ring_buffer_iterator.c index ccfef7dd..671249ae 100644 --- a/lib/ringbuffer/ring_buffer_iterator.c +++ b/lib/ringbuffer/ring_buffer_iterator.c @@ -10,6 +10,7 @@ */ #include +#include #include #include #include @@ -422,13 +423,13 @@ int channel_iterator_init(struct channel *chan) chan->hp_iter_notifier.priority = 10; register_cpu_notifier(&chan->hp_iter_notifier); - get_online_cpus(); + lttng_cpus_read_lock(); for_each_online_cpu(cpu) { buf = per_cpu_ptr(chan->backend.buf, cpu); lib_ring_buffer_iterator_init(chan, buf); } chan->hp_iter_enable = 1; - put_online_cpus(); + lttng_cpus_read_unlock(); #else for_each_possible_cpu(cpu) { buf = per_cpu_ptr(chan->backend.buf, cpu); @@ -501,7 +502,7 @@ int channel_iterator_open(struct channel *chan) CHAN_WARN_ON(chan, config->output != RING_BUFFER_ITERATOR); if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { - get_online_cpus(); + lttng_cpus_read_lock(); /* Allow CPU hotplug to keep track of opened reader */ chan->iter.read_open = 1; for_each_channel_cpu(cpu, chan) { @@ -511,7 +512,7 @@ int channel_iterator_open(struct channel *chan) goto error; buf->iter.read_open = 1; } - put_online_cpus(); + lttng_cpus_read_unlock(); } else { buf = channel_get_ring_buffer(config, chan, 0); ret = lib_ring_buffer_iterator_open(buf); @@ -520,7 +521,7 @@ int channel_iterator_open(struct channel *chan) error: /* Error should always happen on CPU 0, hence no close is required. */ CHAN_WARN_ON(chan, cpu != 0); - put_online_cpus(); + lttng_cpus_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(channel_iterator_open); @@ -532,7 +533,7 @@ void channel_iterator_release(struct channel *chan) int cpu; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { - get_online_cpus(); + lttng_cpus_read_lock(); for_each_channel_cpu(cpu, chan) { buf = channel_get_ring_buffer(config, chan, cpu); if (buf->iter.read_open) { @@ -541,7 +542,7 @@ void channel_iterator_release(struct channel *chan) } } chan->iter.read_open = 0; - put_online_cpus(); + lttng_cpus_read_unlock(); } else { buf = channel_get_ring_buffer(config, chan, 0); lib_ring_buffer_iterator_release(buf); diff --git a/lttng-context-perf-counters.c b/lttng-context-perf-counters.c index e12c73bb..58fbbe61 100644 --- a/lttng-context-perf-counters.c +++ b/lttng-context-perf-counters.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -95,10 +96,10 @@ void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field) { int cpu; - get_online_cpus(); + lttng_cpus_read_lock(); for_each_online_cpu(cpu) perf_event_release_kernel(events[cpu]); - put_online_cpus(); + lttng_cpus_read_unlock(); #ifdef CONFIG_HOTPLUG_CPU unregister_cpu_notifier(&field->u.perf_counter->nb); #endif @@ -288,7 +289,7 @@ int lttng_add_perf_counter_to_ctx(uint32_t type, perf_field->nb.priority = 0; register_cpu_notifier(&perf_field->nb); #endif - get_online_cpus(); + lttng_cpus_read_lock(); for_each_online_cpu(cpu) { events[cpu] = wrapper_perf_event_create_kernel_counter(attr, cpu, NULL, overflow_callback); @@ -301,7 +302,7 @@ int lttng_add_perf_counter_to_ctx(uint32_t type, goto counter_busy; } } - put_online_cpus(); + lttng_cpus_read_unlock(); perf_field->hp_enable = 1; } #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ @@ -344,7 +345,7 @@ counter_error: if (events[cpu] && !IS_ERR(events[cpu])) perf_event_release_kernel(events[cpu]); } - put_online_cpus(); + lttng_cpus_read_unlock(); #ifdef CONFIG_HOTPLUG_CPU unregister_cpu_notifier(&perf_field->nb); #endif diff --git a/lttng-statedump-impl.c b/lttng-statedump-impl.c index 22d9cc61..6853a180 100644 --- a/lttng-statedump-impl.c +++ b/lttng-statedump-impl.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include @@ -34,6 +33,7 @@ #include #include +#include #include #include #include @@ -760,7 +760,7 @@ int do_lttng_statedump(struct lttng_session *session) * is to guarantee that each CPU has been in a state where is was in * syscall mode (i.e. not in a trap, an IRQ or a soft IRQ). */ - get_online_cpus(); + lttng_cpus_read_lock(); atomic_set(&kernel_threads_to_run, num_online_cpus()); for_each_online_cpu(cpu) { INIT_DELAYED_WORK(&cpu_work[cpu], lttng_statedump_work_func); @@ -768,7 +768,7 @@ int do_lttng_statedump(struct lttng_session *session) } /* Wait for all threads to run */ __wait_event(statedump_wq, (atomic_read(&kernel_threads_to_run) == 0)); - put_online_cpus(); + lttng_cpus_read_unlock(); /* Our work is done */ trace_lttng_statedump_end(session); return 0; diff --git a/wrapper/cpu.h b/wrapper/cpu.h new file mode 100644 index 00000000..f0cfcdf6 --- /dev/null +++ b/wrapper/cpu.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) + * + * wrapper/cpu.h + * + * Copyright (C) 2021 Michael Jeanson + */ + +#ifndef _LTTNG_WRAPPER_CPU_H +#define _LTTNG_WRAPPER_CPU_H + +#include +#include + +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,13,0)) + +static inline +void lttng_cpus_read_lock(void) +{ + cpus_read_lock(); +} + +static inline +void lttng_cpus_read_unlock(void) +{ + cpus_read_unlock(); +} + +#else /* LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,13,0) */ + +static inline +void lttng_cpus_read_lock(void) +{ + get_online_cpus(); +} + +static inline +void lttng_cpus_read_unlock(void) +{ + put_online_cpus(); +} + +#endif /* LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,13,0) */ + +#endif /* _LTTNG_WRAPPER_CPU_H */