ret = PTR_ERR(tracker_pids_list_file);
goto file_error;
}
+ if (atomic_long_add_unless(&session->file->f_count,
+ 1, INT_MAX) == INT_MAX) {
+ goto refcount_error;
+ }
ret = lttng_tracker_pids_list_fops.open(NULL, tracker_pids_list_file);
if (ret < 0)
goto open_error;
m = tracker_pids_list_file->private_data;
m->private = session;
fd_install(file_fd, tracker_pids_list_file);
- atomic_long_inc(&session->file->f_count);
return file_fd;
open_error:
+ atomic_long_dec(&session->file->f_count);
+refcount_error:
fput(tracker_pids_list_file);
file_error:
put_unused_fd(file_fd);
* taken at start of trace.
* Yes, this is only an approximation. Yes, we can (and will) do better
* in future versions.
+ * Return 0 if offset is negative. It may happen if the system sets
+ * the REALTIME clock to 0 after boot.
*/
static
uint64_t measure_clock_offset(void)
{
- uint64_t offset, monotonic[2], realtime;
+ uint64_t monotonic_avg, monotonic[2], realtime;
+ int64_t offset;
struct timespec rts = { 0, 0 };
unsigned long flags;
monotonic[1] = trace_clock_read64();
local_irq_restore(flags);
- offset = (monotonic[0] + monotonic[1]) >> 1;
+ monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
realtime = (uint64_t) rts.tv_sec * NSEC_PER_SEC;
realtime += rts.tv_nsec;
- offset = realtime - offset;
+ offset = (int64_t) realtime - monotonic_avg;
+ if (offset < 0)
+ return 0;
return offset;
}