urcu: fix dependencies on libwfqueue.la
[urcu.git] / urcu-call-rcu-impl.h
CommitLineData
b57aee66
PM
1/*
2 * urcu-call-rcu.c
3 *
4 * Userspace RCU library - batch memory reclamation with kernel API
5 *
6 * Copyright (c) 2010 Paul E. McKenney <paulmck@linux.vnet.ibm.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
c1d2c60b 23#define _GNU_SOURCE
b57aee66
PM
24#include <stdio.h>
25#include <pthread.h>
26#include <signal.h>
27#include <assert.h>
28#include <stdlib.h>
29#include <string.h>
30#include <errno.h>
31#include <poll.h>
32#include <sys/time.h>
33#include <syscall.h>
34#include <unistd.h>
c1d2c60b 35#include <sched.h>
b57aee66
PM
36
37#include "config.h"
38#include "urcu/wfqueue.h"
39#include "urcu-call-rcu.h"
40#include "urcu-pointer.h"
3c24913f 41#include "urcu/list.h"
263e3cf9 42#include "urcu/urcu-futex.h"
b57aee66
PM
43
44/* Data structure that identifies a call_rcu thread. */
45
46struct call_rcu_data {
47 struct cds_wfq_queue cbs;
48 unsigned long flags;
263e3cf9 49 int futex;
73987721 50 unsigned long qlen; /* maintained for debugging. */
b57aee66 51 pthread_t tid;
c1d2c60b 52 int cpu_affinity;
3c24913f 53 struct cds_list_head list;
b57aee66
PM
54} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
55
3c24913f
PM
56/*
57 * List of all call_rcu_data structures to keep valgrind happy.
58 * Protected by call_rcu_mutex.
59 */
60
61CDS_LIST_HEAD(call_rcu_data_list);
62
b57aee66
PM
63/* Link a thread using call_rcu() to its call_rcu thread. */
64
65static __thread struct call_rcu_data *thread_call_rcu_data;
66
67/* Guard call_rcu thread creation. */
68
69static pthread_mutex_t call_rcu_mutex = PTHREAD_MUTEX_INITIALIZER;
70
71/* If a given thread does not have its own call_rcu thread, this is default. */
72
73static struct call_rcu_data *default_call_rcu_data;
74
b57aee66
PM
75/*
76 * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
77 * available, then we can have call_rcu threads assigned to individual
78 * CPUs rather than only to specific threads.
79 */
80
81#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
82
83/*
84 * Pointer to array of pointers to per-CPU call_rcu_data structures
85 * and # CPUs.
86 */
87
88static struct call_rcu_data **per_cpu_call_rcu_data;
89static long maxcpus;
90
263e3cf9
MD
91static void call_rcu_wait(struct call_rcu_data *crdp)
92{
93 /* Read call_rcu list before read futex */
94 cmm_smp_mb();
95 if (uatomic_read(&crdp->futex) == -1)
96 futex_async(&crdp->futex, FUTEX_WAIT, -1,
97 NULL, NULL, 0);
98}
99
100static void call_rcu_wake_up(struct call_rcu_data *crdp)
101{
102 /* Write to call_rcu list before reading/writing futex */
103 cmm_smp_mb();
104 if (unlikely(uatomic_read(&crdp->futex) == -1)) {
105 uatomic_set(&crdp->futex, 0);
106 futex_async(&crdp->futex, FUTEX_WAKE, 1,
107 NULL, NULL, 0);
108 }
109}
110
b57aee66
PM
111/* Allocate the array if it has not already been allocated. */
112
113static void alloc_cpu_call_rcu_data(void)
114{
115 struct call_rcu_data **p;
116 static int warned = 0;
117
118 if (maxcpus != 0)
119 return;
120 maxcpus = sysconf(_SC_NPROCESSORS_CONF);
121 if (maxcpus <= 0) {
122 return;
123 }
124 p = malloc(maxcpus * sizeof(*per_cpu_call_rcu_data));
125 if (p != NULL) {
126 memset(p, '\0', maxcpus * sizeof(*per_cpu_call_rcu_data));
127 per_cpu_call_rcu_data = p;
128 } else {
129 if (!warned) {
130 fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n");
131 }
132 warned = 1;
133 }
134}
135
136#else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
137
138static const struct call_rcu_data **per_cpu_call_rcu_data = NULL;
139static const long maxcpus = -1;
140
141static void alloc_cpu_call_rcu_data(void)
142{
143}
144
145static int sched_getcpu(void)
146{
147 return -1;
148}
149
150#endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
151
152/* Acquire the specified pthread mutex. */
153
154static void call_rcu_lock(pthread_mutex_t *pmp)
155{
156 if (pthread_mutex_lock(pmp) != 0) {
157 perror("pthread_mutex_lock");
158 exit(-1);
159 }
160}
161
162/* Release the specified pthread mutex. */
163
164static void call_rcu_unlock(pthread_mutex_t *pmp)
165{
166 if (pthread_mutex_unlock(pmp) != 0) {
167 perror("pthread_mutex_unlock");
168 exit(-1);
169 }
170}
171
c1d2c60b
MD
172#if HAVE_SCHED_SETAFFINITY
173static
174int set_thread_cpu_affinity(struct call_rcu_data *crdp)
175{
176 cpu_set_t mask;
177
178 if (crdp->cpu_affinity < 0)
179 return 0;
180
181 CPU_ZERO(&mask);
182 CPU_SET(crdp->cpu_affinity, &mask);
183#if SCHED_SETAFFINITY_ARGS == 2
184 return sched_setaffinity(0, &mask);
185#else
186 return sched_setaffinity(0, sizeof(mask), &mask);
187#endif
188}
189#else
190static
191int set_thread_cpu_affinity(struct call_rcu_data *crdp)
192{
193 return 0;
194}
195#endif
196
b57aee66
PM
197/* This is the code run by each call_rcu thread. */
198
199static void *call_rcu_thread(void *arg)
200{
201 unsigned long cbcount;
202 struct cds_wfq_node *cbs;
203 struct cds_wfq_node **cbs_tail;
204 struct call_rcu_data *crdp = (struct call_rcu_data *)arg;
205 struct rcu_head *rhp;
2870aa1e 206 int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT);
b57aee66 207
c1d2c60b
MD
208 if (set_thread_cpu_affinity(crdp) != 0) {
209 perror("pthread_setaffinity_np");
210 exit(-1);
211 }
212
b57aee66
PM
213 thread_call_rcu_data = crdp;
214 for (;;) {
2870aa1e 215 if (!rt) {
c768e45e
MD
216 uatomic_dec(&crdp->futex);
217 /* Decrement futex before reading call_rcu list */
218 cmm_smp_mb();
219 }
b57aee66
PM
220 if (&crdp->cbs.head != _CMM_LOAD_SHARED(crdp->cbs.tail)) {
221 while ((cbs = _CMM_LOAD_SHARED(crdp->cbs.head)) == NULL)
222 poll(NULL, 0, 1);
223 _CMM_STORE_SHARED(crdp->cbs.head, NULL);
224 cbs_tail = (struct cds_wfq_node **)
225 uatomic_xchg(&crdp->cbs.tail, &crdp->cbs.head);
226 synchronize_rcu();
227 cbcount = 0;
228 do {
229 while (cbs->next == NULL &&
230 &cbs->next != cbs_tail)
231 poll(NULL, 0, 1);
232 if (cbs == &crdp->cbs.dummy) {
233 cbs = cbs->next;
234 continue;
235 }
236 rhp = (struct rcu_head *)cbs;
237 cbs = cbs->next;
238 rhp->func(rhp);
239 cbcount++;
240 } while (cbs != NULL);
241 uatomic_sub(&crdp->qlen, cbcount);
242 }
2870aa1e
PB
243 if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOP) {
244 if (!rt) {
c768e45e
MD
245 /*
246 * Read call_rcu list before write futex.
247 */
248 cmm_smp_mb();
249 uatomic_set(&crdp->futex, 0);
250 }
7106ddf8 251 break;
c768e45e 252 }
2870aa1e 253 if (!rt) {
263e3cf9
MD
254 if (&crdp->cbs.head == _CMM_LOAD_SHARED(crdp->cbs.tail))
255 call_rcu_wait(crdp);
b57aee66 256 }
c768e45e 257 poll(NULL, 0, 10);
b57aee66 258 }
2870aa1e 259 uatomic_or(&crdp->flags, URCU_CALL_RCU_STOPPED);
7106ddf8 260 return NULL;
b57aee66
PM
261}
262
263/*
264 * Create both a call_rcu thread and the corresponding call_rcu_data
3c24913f
PM
265 * structure, linking the structure in as specified. Caller must hold
266 * call_rcu_mutex.
b57aee66
PM
267 */
268
3c24913f 269static void call_rcu_data_init(struct call_rcu_data **crdpp,
c1d2c60b
MD
270 unsigned long flags,
271 int cpu_affinity)
b57aee66
PM
272{
273 struct call_rcu_data *crdp;
274
275 crdp = malloc(sizeof(*crdp));
276 if (crdp == NULL) {
277 fprintf(stderr, "Out of memory.\n");
278 exit(-1);
279 }
280 memset(crdp, '\0', sizeof(*crdp));
281 cds_wfq_init(&crdp->cbs);
282 crdp->qlen = 0;
263e3cf9
MD
283 crdp->futex = 0;
284 crdp->flags = flags;
3c24913f 285 cds_list_add(&crdp->list, &call_rcu_data_list);
c1d2c60b 286 crdp->cpu_affinity = cpu_affinity;
b57aee66
PM
287 cmm_smp_mb(); /* Structure initialized before pointer is planted. */
288 *crdpp = crdp;
289 if (pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp) != 0) {
290 perror("pthread_create");
291 exit(-1);
292 }
293}
294
295/*
296 * Return a pointer to the call_rcu_data structure for the specified
297 * CPU, returning NULL if there is none. We cannot automatically
298 * created it because the platform we are running on might not define
299 * sched_getcpu().
300 */
301
302struct call_rcu_data *get_cpu_call_rcu_data(int cpu)
303{
304 static int warned = 0;
305
306 if (per_cpu_call_rcu_data == NULL)
307 return NULL;
308 if (!warned && maxcpus > 0 && (cpu < 0 || maxcpus <= cpu)) {
309 fprintf(stderr, "[error] liburcu: get CPU # out of range\n");
310 warned = 1;
311 }
312 if (cpu < 0 || maxcpus <= cpu)
313 return NULL;
314 return per_cpu_call_rcu_data[cpu];
315}
316
317/*
318 * Return the tid corresponding to the call_rcu thread whose
319 * call_rcu_data structure is specified.
320 */
321
322pthread_t get_call_rcu_thread(struct call_rcu_data *crdp)
323{
324 return crdp->tid;
325}
326
327/*
328 * Create a call_rcu_data structure (with thread) and return a pointer.
329 */
330
c1d2c60b
MD
331static struct call_rcu_data *__create_call_rcu_data(unsigned long flags,
332 int cpu_affinity)
b57aee66
PM
333{
334 struct call_rcu_data *crdp;
335
c1d2c60b 336 call_rcu_data_init(&crdp, flags, cpu_affinity);
b57aee66
PM
337 return crdp;
338}
339
c1d2c60b
MD
340struct call_rcu_data *create_call_rcu_data(unsigned long flags,
341 int cpu_affinity)
3c24913f
PM
342{
343 struct call_rcu_data *crdp;
344
345 call_rcu_lock(&call_rcu_mutex);
c1d2c60b 346 crdp = __create_call_rcu_data(flags, cpu_affinity);
3c24913f
PM
347 call_rcu_unlock(&call_rcu_mutex);
348 return crdp;
349}
350
b57aee66
PM
351/*
352 * Set the specified CPU to use the specified call_rcu_data structure.
7106ddf8
PM
353 *
354 * Use NULL to remove a CPU's call_rcu_data structure, but it is
355 * the caller's responsibility to dispose of the removed structure.
356 * Use get_cpu_call_rcu_data() to obtain a pointer to the old structure
357 * (prior to NULLing it out, of course).
b57aee66
PM
358 */
359
360int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp)
361{
362 int warned = 0;
363
364 call_rcu_lock(&call_rcu_mutex);
365 if (cpu < 0 || maxcpus <= cpu) {
366 if (!warned) {
367 fprintf(stderr, "[error] liburcu: set CPU # out of range\n");
368 warned = 1;
369 }
370 call_rcu_unlock(&call_rcu_mutex);
371 errno = EINVAL;
372 return -EINVAL;
373 }
374 alloc_cpu_call_rcu_data();
375 call_rcu_unlock(&call_rcu_mutex);
376 if (per_cpu_call_rcu_data == NULL) {
377 errno = ENOMEM;
378 return -ENOMEM;
379 }
380 per_cpu_call_rcu_data[cpu] = crdp;
381 return 0;
382}
383
384/*
385 * Return a pointer to the default call_rcu_data structure, creating
386 * one if need be. Because we never free call_rcu_data structures,
387 * we don't need to be in an RCU read-side critical section.
388 */
389
390struct call_rcu_data *get_default_call_rcu_data(void)
391{
392 if (default_call_rcu_data != NULL)
393 return rcu_dereference(default_call_rcu_data);
394 call_rcu_lock(&call_rcu_mutex);
395 if (default_call_rcu_data != NULL) {
396 call_rcu_unlock(&call_rcu_mutex);
397 return default_call_rcu_data;
398 }
c1d2c60b 399 call_rcu_data_init(&default_call_rcu_data, 0, -1);
b57aee66
PM
400 call_rcu_unlock(&call_rcu_mutex);
401 return default_call_rcu_data;
402}
403
404/*
405 * Return the call_rcu_data structure that applies to the currently
406 * running thread. Any call_rcu_data structure assigned specifically
407 * to this thread has first priority, followed by any call_rcu_data
408 * structure assigned to the CPU on which the thread is running,
409 * followed by the default call_rcu_data structure. If there is not
410 * yet a default call_rcu_data structure, one will be created.
411 */
412struct call_rcu_data *get_call_rcu_data(void)
413{
414 int curcpu;
415 static int warned = 0;
416
417 if (thread_call_rcu_data != NULL)
418 return thread_call_rcu_data;
419 if (maxcpus <= 0)
420 return get_default_call_rcu_data();
421 curcpu = sched_getcpu();
422 if (!warned && (curcpu < 0 || maxcpus <= curcpu)) {
423 fprintf(stderr, "[error] liburcu: gcrd CPU # out of range\n");
424 warned = 1;
425 }
426 if (curcpu >= 0 && maxcpus > curcpu &&
427 per_cpu_call_rcu_data != NULL &&
428 per_cpu_call_rcu_data[curcpu] != NULL)
429 return per_cpu_call_rcu_data[curcpu];
430 return get_default_call_rcu_data();
431}
432
433/*
434 * Return a pointer to this task's call_rcu_data if there is one.
435 */
436
437struct call_rcu_data *get_thread_call_rcu_data(void)
438{
439 return thread_call_rcu_data;
440}
441
442/*
443 * Set this task's call_rcu_data structure as specified, regardless
444 * of whether or not this task already had one. (This allows switching
445 * to and from real-time call_rcu threads, for example.)
7106ddf8
PM
446 *
447 * Use NULL to remove a thread's call_rcu_data structure, but it is
448 * the caller's responsibility to dispose of the removed structure.
449 * Use get_thread_call_rcu_data() to obtain a pointer to the old structure
450 * (prior to NULLing it out, of course).
b57aee66
PM
451 */
452
453void set_thread_call_rcu_data(struct call_rcu_data *crdp)
454{
455 thread_call_rcu_data = crdp;
456}
457
458/*
459 * Create a separate call_rcu thread for each CPU. This does not
460 * replace a pre-existing call_rcu thread -- use the set_cpu_call_rcu_data()
461 * function if you want that behavior.
462 */
463
464int create_all_cpu_call_rcu_data(unsigned long flags)
465{
466 int i;
467 struct call_rcu_data *crdp;
468 int ret;
469
470 call_rcu_lock(&call_rcu_mutex);
471 alloc_cpu_call_rcu_data();
472 call_rcu_unlock(&call_rcu_mutex);
473 if (maxcpus <= 0) {
474 errno = EINVAL;
475 return -EINVAL;
476 }
477 if (per_cpu_call_rcu_data == NULL) {
478 errno = ENOMEM;
479 return -ENOMEM;
480 }
481 for (i = 0; i < maxcpus; i++) {
482 call_rcu_lock(&call_rcu_mutex);
483 if (get_cpu_call_rcu_data(i)) {
484 call_rcu_unlock(&call_rcu_mutex);
485 continue;
486 }
c1d2c60b 487 crdp = __create_call_rcu_data(flags, i);
b57aee66
PM
488 if (crdp == NULL) {
489 call_rcu_unlock(&call_rcu_mutex);
490 errno = ENOMEM;
491 return -ENOMEM;
492 }
493 call_rcu_unlock(&call_rcu_mutex);
494 if ((ret = set_cpu_call_rcu_data(i, crdp)) != 0) {
495 /* FIXME: Leaks crdp for now. */
496 return ret; /* Can happen on race. */
497 }
498 }
499 return 0;
500}
501
7106ddf8
PM
502/*
503 * Wake up the call_rcu thread corresponding to the specified
504 * call_rcu_data structure.
505 */
506static void wake_call_rcu_thread(struct call_rcu_data *crdp)
507{
263e3cf9
MD
508 if (!(_CMM_LOAD_SHARED(crdp->flags) & URCU_CALL_RCU_RT))
509 call_rcu_wake_up(crdp);
7106ddf8
PM
510}
511
b57aee66
PM
512/*
513 * Schedule a function to be invoked after a following grace period.
514 * This is the only function that must be called -- the others are
515 * only present to allow applications to tune their use of RCU for
516 * maximum performance.
517 *
518 * Note that unless a call_rcu thread has not already been created,
519 * the first invocation of call_rcu() will create one. So, if you
520 * need the first invocation of call_rcu() to be fast, make sure
521 * to create a call_rcu thread first. One way to accomplish this is
522 * "get_call_rcu_data();", and another is create_all_cpu_call_rcu_data().
523 */
524
525void call_rcu(struct rcu_head *head,
526 void (*func)(struct rcu_head *head))
527{
528 struct call_rcu_data *crdp;
529
530 cds_wfq_node_init(&head->next);
531 head->func = func;
532 crdp = get_call_rcu_data();
533 cds_wfq_enqueue(&crdp->cbs, &head->next);
534 uatomic_inc(&crdp->qlen);
7106ddf8
PM
535 wake_call_rcu_thread(crdp);
536}
537
538/*
539 * Free up the specified call_rcu_data structure, terminating the
540 * associated call_rcu thread. The caller must have previously
541 * removed the call_rcu_data structure from per-thread or per-CPU
542 * usage. For example, set_cpu_call_rcu_data(cpu, NULL) for per-CPU
543 * call_rcu_data structures or set_thread_call_rcu_data(NULL) for
544 * per-thread call_rcu_data structures.
545 *
546 * We silently refuse to free up the default call_rcu_data structure
547 * because that is where we put any leftover callbacks. Note that
548 * the possibility of self-spawning callbacks makes it impossible
549 * to execute all the callbacks in finite time without putting any
550 * newly spawned callbacks somewhere else. The "somewhere else" of
551 * last resort is the default call_rcu_data structure.
552 *
553 * We also silently refuse to free NULL pointers. This simplifies
554 * the calling code.
555 */
556void call_rcu_data_free(struct call_rcu_data *crdp)
557{
558 struct cds_wfq_node *cbs;
559 struct cds_wfq_node **cbs_tail;
560 struct cds_wfq_node **cbs_endprev;
561
562 if (crdp == NULL || crdp == default_call_rcu_data) {
563 return;
564 }
2870aa1e
PB
565 if ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0) {
566 uatomic_or(&crdp->flags, URCU_CALL_RCU_STOP);
7106ddf8 567 wake_call_rcu_thread(crdp);
2870aa1e 568 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0)
7106ddf8
PM
569 poll(NULL, 0, 1);
570 }
571 if (&crdp->cbs.head != _CMM_LOAD_SHARED(crdp->cbs.tail)) {
572 while ((cbs = _CMM_LOAD_SHARED(crdp->cbs.head)) == NULL)
573 poll(NULL, 0, 1);
574 _CMM_STORE_SHARED(crdp->cbs.head, NULL);
575 cbs_tail = (struct cds_wfq_node **)
576 uatomic_xchg(&crdp->cbs.tail, &crdp->cbs.head);
577 cbs_endprev = (struct cds_wfq_node **)
578 uatomic_xchg(&default_call_rcu_data, cbs_tail);
579 *cbs_endprev = cbs;
580 uatomic_add(&default_call_rcu_data->qlen,
581 uatomic_read(&crdp->qlen));
582 cds_list_del(&crdp->list);
583 free(crdp);
584 }
585}
586
587/*
588 * Clean up all the per-CPU call_rcu threads.
589 */
590void free_all_cpu_call_rcu_data(void)
591{
592 int cpu;
593 struct call_rcu_data *crdp;
594
595 if (maxcpus <= 0)
596 return;
597 for (cpu = 0; cpu < maxcpus; cpu++) {
598 crdp = get_cpu_call_rcu_data(cpu);
599 if (crdp == NULL)
600 continue;
601 set_cpu_call_rcu_data(cpu, NULL);
602 call_rcu_data_free(crdp);
603 }
604}
605
81ad2e19
PM
606/*
607 * Acquire the call_rcu_mutex in order to ensure that the child sees
608 * all of the call_rcu() data structures in a consistent state.
609 * Suitable for pthread_atfork() and friends.
610 */
611void call_rcu_before_fork(void)
612{
613 call_rcu_lock(&call_rcu_mutex);
614}
615
616/*
617 * Clean up call_rcu data structures in the parent of a successful fork()
618 * that is not followed by exec() in the child. Suitable for
619 * pthread_atfork() and friends.
620 */
621void call_rcu_after_fork_parent(void)
622{
623 call_rcu_unlock(&call_rcu_mutex);
624}
625
7106ddf8
PM
626/*
627 * Clean up call_rcu data structures in the child of a successful fork()
81ad2e19
PM
628 * that is not followed by exec(). Suitable for pthread_atfork() and
629 * friends.
7106ddf8
PM
630 */
631void call_rcu_after_fork_child(void)
632{
633 struct call_rcu_data *crdp;
634
81ad2e19
PM
635 /* Release the mutex. */
636 call_rcu_unlock(&call_rcu_mutex);
637
7106ddf8
PM
638 /*
639 * Allocate a new default call_rcu_data structure in order
640 * to get a working call_rcu thread to go with it.
641 */
642 default_call_rcu_data = NULL;
643 (void)get_default_call_rcu_data();
644
645 /* Dispose of all of the rest of the call_rcu_data structures. */
646 while (call_rcu_data_list.next != call_rcu_data_list.prev) {
647 crdp = cds_list_entry(call_rcu_data_list.prev,
648 struct call_rcu_data, list);
649 if (crdp == default_call_rcu_data)
650 crdp = cds_list_entry(crdp->list.prev,
651 struct call_rcu_data, list);
2870aa1e 652 uatomic_set(&crdp->flags, URCU_CALL_RCU_STOPPED);
7106ddf8 653 call_rcu_data_free(crdp);
b57aee66
PM
654 }
655}
This page took 0.075829 seconds and 4 git commands to generate.