ring buffer: Use cpu_dcache_is_aliasing()
[lttng-modules.git] / src / lib / ringbuffer / ring_buffer_frontend.c
index 8a69e9ab57eec661de51040b4552daaa56b67dcf..fbf3a16837c8574ebf6b90a2986d804626ba2d97 100644 (file)
  *   - put_subbuf
  */
 
+#include <linux/atomic.h>
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/percpu.h>
+#include <linux/percpu-defs.h>
 #include <asm/cacheflush.h>
 
 #include <ringbuffer/config.h>
 #include <ringbuffer/frontend.h>
 #include <ringbuffer/iterator.h>
 #include <ringbuffer/nohz.h>
-#include <wrapper/atomic.h>
 #include <wrapper/cpu.h>
 #include <wrapper/kref.h>
-#include <wrapper/percpu-defs.h>
 #include <wrapper/timer.h>
 #include <wrapper/vmalloc.h>
 
@@ -652,16 +652,16 @@ static int notrace ring_buffer_tick_nohz_callback(struct notifier_block *nb,
                raw_spin_unlock(&buf->raw_tick_nohz_spinlock);
                break;
        case TICK_NOHZ_STOP:
-               spin_lock(lttng_this_cpu_ptr(&ring_buffer_nohz_lock));
+               spin_lock(this_cpu_ptr(&ring_buffer_nohz_lock));
                lib_ring_buffer_stop_switch_timer(buf);
                lib_ring_buffer_stop_read_timer(buf);
-               spin_unlock(lttng_this_cpu_ptr(&ring_buffer_nohz_lock));
+               spin_unlock(this_cpu_ptr(&ring_buffer_nohz_lock));
                break;
        case TICK_NOHZ_RESTART:
-               spin_lock(lttng_this_cpu_ptr(&ring_buffer_nohz_lock));
+               spin_lock(this_cpu_ptr(&ring_buffer_nohz_lock));
                lib_ring_buffer_start_read_timer(buf);
                lib_ring_buffer_start_switch_timer(buf);
-               spin_unlock(lttng_this_cpu_ptr(&ring_buffer_nohz_lock));
+               spin_unlock(this_cpu_ptr(&ring_buffer_nohz_lock));
                break;
        }
 
@@ -1047,7 +1047,7 @@ int lib_ring_buffer_open_read(struct lttng_kernel_ring_buffer *buf)
                atomic_long_dec(&buf->active_readers);
                return -EOVERFLOW;
        }
-       lttng_smp_mb__after_atomic();
+       smp_mb__after_atomic();
        return 0;
 }
 EXPORT_SYMBOL_GPL(lib_ring_buffer_open_read);
@@ -1057,7 +1057,7 @@ void lib_ring_buffer_release_read(struct lttng_kernel_ring_buffer *buf)
        struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
 
        CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
-       lttng_smp_mb__before_atomic();
+       smp_mb__before_atomic();
        atomic_long_dec(&buf->active_readers);
        kref_put(&chan->ref, channel_release);
 }
@@ -1210,6 +1210,16 @@ static void lib_ring_buffer_flush_read_subbuf_dcache(
        if (config->output != RING_BUFFER_MMAP)
                return;
 
+#ifdef cpu_dcache_is_aliasing
+       /*
+        * Some architectures implement flush_dcache_page() but don't
+        * actually have aliasing dcache. cpu_dcache_is_aliasing() was
+        * introduced in kernel v6.9 to query this more precisely.
+        */
+       if (!cpu_dcache_is_aliasing())
+               return;
+#endif
+
        /*
         * Architectures with caches aliased on virtual addresses may
         * use different cache lines for the linear mapping vs
This page took 0.025015 seconds and 4 git commands to generate.