02115c7a668179d81c92ce962f95fb39025e5114
[lttng-modules.git] / instrumentation / events / lttng-module / workqueue.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM workqueue
3
4 #if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_WORKQUEUE_H
6
7 #include <linux/tracepoint.h>
8 #include <linux/workqueue.h>
9 #include <linux/version.h>
10
11 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
12
13 #ifndef _TRACE_WORKQUEUE_DEF_
14 #define _TRACE_WORKQUEUE_DEF_
15
16 struct worker;
17 struct global_cwq;
18
19 /* Have to duplicate all these definitions from kernel/workqueue.c */
20 /* Extracts only */
21
22 enum {
23 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
24 NR_WORKER_POOLS = 2, /* # worker pools per gcwq */
25 #endif
26 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
27 BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER,
28 };
29
30 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
31 struct worker_pool {
32 struct global_cwq *gcwq; /* I: the owning gcwq */
33 unsigned int flags; /* X: flags */
34
35 struct list_head worklist; /* L: list of pending works */
36 int nr_workers; /* L: total number of workers */
37 int nr_idle; /* L: currently idle ones */
38
39 struct list_head idle_list; /* X: list of idle workers */
40 struct timer_list idle_timer; /* L: worker idle timeout */
41 struct timer_list mayday_timer; /* L: SOS timer for workers */
42
43 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
44 struct mutex assoc_mutex; /* protect GCWQ_DISASSOCUATED */
45 #else
46 struct mutex manager_mutex; /* mutex manager should hold */
47 #endif
48 struct ida worker_ida; /* L: for worker IDs */
49 };
50
51 struct global_cwq {
52 spinlock_t lock; /* the gcwq lock */
53 unsigned int cpu; /* I: the associated cpu */
54 unsigned int flags; /* L: GCWQ_* flags */
55
56 /* workers are chained either in busy_hash or pool idle_list */
57 struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE];
58 /* L: hash of busy workers */
59
60 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
61 struct worker_pool pools[NR_WORKER_POOLS];
62 /* normal and highpri pools */
63 #else
64 struct worker_pool pools[2]; /* normal and highpri pools */
65
66 wait_queue_head_t rebind_hold; /* rebind hold wait */
67 #endif
68 } ____cacheline_aligned_in_smp;
69
70 #else
71
72 struct global_cwq {
73 spinlock_t lock; /* the gcwq lock */
74 struct list_head worklist; /* L: list of pending works */
75 unsigned int cpu; /* I: the associated cpu */
76 unsigned int flags; /* L: GCWQ_* flags */
77
78 int nr_workers; /* L: total number of workers */
79 int nr_idle; /* L: currently idle ones */
80
81 /* workers are chained either in the idle_list or busy_hash */
82 struct list_head idle_list; /* X: list of idle workers */
83 struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE];
84 /* L: hash of busy workers */
85
86 struct timer_list idle_timer; /* L: worker idle timeout */
87 struct timer_list mayday_timer; /* L: SOS timer for dworkers */
88
89 struct ida worker_ida; /* L: for worker IDs */
90
91 struct task_struct *trustee; /* L: for gcwq shutdown */
92 unsigned int trustee_state; /* L: trustee state */
93 wait_queue_head_t trustee_wait; /* trustee wait */
94 struct worker *first_idle; /* L: first idle worker */
95 } ____cacheline_aligned_in_smp;
96
97 #endif
98
99 struct cpu_workqueue_struct {
100 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
101 struct worker_pool *pool; /* I: The associated pool */
102 #else
103 struct global_cwq *gcwq; /* I: the associated gcwq */
104 #endif
105 struct workqueue_struct *wq; /* I: the owning workqueue */
106 int work_color; /* L: current color */
107 int flush_color; /* L: flushing color */
108 int nr_in_flight[WORK_NR_COLORS];
109 /* L: nr of in_flight works */
110 int nr_active; /* L: nr of active works */
111 int max_active; /* L: max active works */
112 struct list_head delayed_works; /* L: delayed works */
113 };
114
115 #endif
116
117 DECLARE_EVENT_CLASS(workqueue_work,
118
119 TP_PROTO(struct work_struct *work),
120
121 TP_ARGS(work),
122
123 TP_STRUCT__entry(
124 __field( void *, work )
125 ),
126
127 TP_fast_assign(
128 tp_assign(work, work)
129 ),
130
131 TP_printk("work struct %p", __entry->work)
132 )
133
134 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
135 /**
136 * workqueue_queue_work - called when a work gets queued
137 * @req_cpu: the requested cpu
138 * @cwq: pointer to struct cpu_workqueue_struct
139 * @work: pointer to struct work_struct
140 *
141 * This event occurs when a work is queued immediately or once a
142 * delayed work is actually queued on a workqueue (ie: once the delay
143 * has been reached).
144 */
145 TRACE_EVENT(workqueue_queue_work,
146
147 TP_PROTO(unsigned int req_cpu, struct cpu_workqueue_struct *cwq,
148 struct work_struct *work),
149
150 TP_ARGS(req_cpu, cwq, work),
151
152 TP_STRUCT__entry(
153 __field( void *, work )
154 __field( void *, function)
155 __field( void *, workqueue)
156 __field( unsigned int, req_cpu )
157 __field( unsigned int, cpu )
158 ),
159
160 TP_fast_assign(
161 tp_assign(work, work)
162 tp_assign(function, work->func)
163 tp_assign(workqueue, cwq->wq)
164 tp_assign(req_cpu, req_cpu)
165 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
166 tp_assign(cpu, cwq->pool->gcwq->cpu)
167 #else
168 tp_assign(cpu, cwq->gcwq->cpu)
169 #endif
170 ),
171
172 TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u",
173 __entry->work, __entry->function, __entry->workqueue,
174 __entry->req_cpu, __entry->cpu)
175 )
176
177 /**
178 * workqueue_activate_work - called when a work gets activated
179 * @work: pointer to struct work_struct
180 *
181 * This event occurs when a queued work is put on the active queue,
182 * which happens immediately after queueing unless @max_active limit
183 * is reached.
184 */
185 DEFINE_EVENT(workqueue_work, workqueue_activate_work,
186
187 TP_PROTO(struct work_struct *work),
188
189 TP_ARGS(work)
190 )
191 #endif
192
193 /**
194 * workqueue_execute_start - called immediately before the workqueue callback
195 * @work: pointer to struct work_struct
196 *
197 * Allows to track workqueue execution.
198 */
199 TRACE_EVENT(workqueue_execute_start,
200
201 TP_PROTO(struct work_struct *work),
202
203 TP_ARGS(work),
204
205 TP_STRUCT__entry(
206 __field( void *, work )
207 __field( void *, function)
208 ),
209
210 TP_fast_assign(
211 tp_assign(work, work)
212 tp_assign(function, work->func)
213 ),
214
215 TP_printk("work struct %p: function %pf", __entry->work, __entry->function)
216 )
217
218 /**
219 * workqueue_execute_end - called immediately after the workqueue callback
220 * @work: pointer to struct work_struct
221 *
222 * Allows to track workqueue execution.
223 */
224 DEFINE_EVENT(workqueue_work, workqueue_execute_end,
225
226 TP_PROTO(struct work_struct *work),
227
228 TP_ARGS(work)
229 )
230
231 #else
232
233 DECLARE_EVENT_CLASS(workqueue,
234
235 TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
236
237 TP_ARGS(wq_thread, work),
238
239 TP_STRUCT__entry(
240 __array(char, thread_comm, TASK_COMM_LEN)
241 __field(pid_t, thread_pid)
242 __field(work_func_t, func)
243 ),
244
245 TP_fast_assign(
246 tp_memcpy(thread_comm, wq_thread->comm, TASK_COMM_LEN)
247 tp_assign(thread_pid, wq_thread->pid)
248 tp_assign(func, work->func)
249 ),
250
251 TP_printk("thread=%s:%d func=%pf", __entry->thread_comm,
252 __entry->thread_pid, __entry->func)
253 )
254
255 DEFINE_EVENT(workqueue, workqueue_insertion,
256
257 TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
258
259 TP_ARGS(wq_thread, work)
260 )
261
262 DEFINE_EVENT(workqueue, workqueue_execution,
263
264 TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
265
266 TP_ARGS(wq_thread, work)
267 )
268
269 /* Trace the creation of one workqueue thread on a cpu */
270 TRACE_EVENT(workqueue_creation,
271
272 TP_PROTO(struct task_struct *wq_thread, int cpu),
273
274 TP_ARGS(wq_thread, cpu),
275
276 TP_STRUCT__entry(
277 __array(char, thread_comm, TASK_COMM_LEN)
278 __field(pid_t, thread_pid)
279 __field(int, cpu)
280 ),
281
282 TP_fast_assign(
283 tp_memcpy(thread_comm, wq_thread->comm, TASK_COMM_LEN)
284 tp_assign(thread_pid, wq_thread->pid)
285 tp_assign(cpu, cpu)
286 ),
287
288 TP_printk("thread=%s:%d cpu=%d", __entry->thread_comm,
289 __entry->thread_pid, __entry->cpu)
290 )
291
292 TRACE_EVENT(workqueue_destruction,
293
294 TP_PROTO(struct task_struct *wq_thread),
295
296 TP_ARGS(wq_thread),
297
298 TP_STRUCT__entry(
299 __array(char, thread_comm, TASK_COMM_LEN)
300 __field(pid_t, thread_pid)
301 ),
302
303 TP_fast_assign(
304 tp_memcpy(thread_comm, wq_thread->comm, TASK_COMM_LEN)
305 tp_assign(thread_pid, wq_thread->pid)
306 ),
307
308 TP_printk("thread=%s:%d", __entry->thread_comm, __entry->thread_pid)
309 )
310
311 #endif
312
313 #endif /* _TRACE_WORKQUEUE_H */
314
315 /* This part must be outside protection */
316 #include "../../../probes/define_trace.h"
This page took 0.035137 seconds and 3 git commands to generate.