29 int diff = end - *startp;
31 if (diff >= (
int)
sizeof(
event->header)) {
35 size =
event->header.size;
37 if (size <
sizeof(
event->header) || diff < (int)size)
44 if ((*startp & map->
mask) + size != ((*startp +
size) & map->
mask)) {
45 unsigned int offset = *startp;
46 unsigned int len =
min(
sizeof(*
event), size), cpy;
47 void *dst = map->event_copy;
50 cpy =
min(map->
mask + 1 - (offset & map->
mask), len);
85 if (!refcount_read(&map->
refcnt))
107 refcount_inc(&map->
refcnt);
112 BUG_ON(map->
base && refcount_read(&map->
refcnt) == 0);
114 if (refcount_dec_and_test(&map->
refcnt))
132 void *userpg __maybe_unused,
133 int fd __maybe_unused)
143 off_t auxtrace_offset __maybe_unused,
144 unsigned int auxtrace_pages __maybe_unused,
145 bool auxtrace_overwrite __maybe_unused)
151 int idx __maybe_unused,
152 bool per_cpu __maybe_unused)
158 if (map->
base != NULL) {
162 refcount_set(&map->
refcnt, 0);
182 refcount_set(&map->
refcnt, 2);
187 if (map->
base == MAP_FAILED) {
188 pr_debug2(
"failed to mmap perf event ring buffer, error %d\n",
204 struct perf_event_header *pheader;
205 u64 evt_head = *
start;
208 pr_debug2(
"%s: buf=%p, start=%"PRIx64
"\n", __func__, buf, *start);
209 pheader = (
struct perf_event_header *)(buf + (*start & mask));
211 if (evt_head - *start >= (
unsigned int)
size) {
212 pr_debug(
"Finished reading overwrite ring buffer: rewind\n");
213 if (evt_head - *start > (
unsigned int)size)
214 evt_head -= pheader->size;
219 pheader = (
struct perf_event_header *)(buf + (evt_head & mask));
221 if (pheader->size == 0) {
222 pr_debug(
"Finished reading overwrite ring buffer: get start\n");
227 evt_head += pheader->size;
228 pr_debug3(
"move evt_head: %"PRIx64
"\n", evt_head);
230 WARN_ONCE(1,
"Shouldn't get here\n");
251 if (size > (
unsigned long)(md->
mask) + 1) {
253 WARN_ONCE(1,
"failed to keep up with mmap data. (warn only once)\n");
276 if (!refcount_read(&map->
refcnt))
283 int push(
void *to,
void *buf,
size_t size))
293 return (rc == -EAGAIN) ? 0 : -1;
302 if (push(to, buf, size) < 0) {
312 if (push(to, buf, size) < 0) {
334 if (!refcount_read(&map->
refcnt))
void perf_mmap__consume(struct perf_mmap *map)
void perf_mmap__put(struct perf_mmap *map)
void perf_mmap__get(struct perf_mmap *map)
#define pr_debug2(fmt,...)
void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused, off_t auxtrace_offset __maybe_unused, unsigned int auxtrace_pages __maybe_unused, bool auxtrace_overwrite __maybe_unused)
void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused, struct perf_evlist *evlist __maybe_unused, int idx __maybe_unused, bool per_cpu __maybe_unused)
static void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
int perf_mmap__read_init(struct perf_mmap *map)
#define pr_debug(fmt,...)
void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd)
int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused, struct auxtrace_mmap_params *mp __maybe_unused, void *userpg __maybe_unused, int fd __maybe_unused)
static int __perf_mmap__read_init(struct perf_mmap *md)
x86 movsq based memcpy() in arch/x86/lib/memcpy_64.S") MEMCPY_FN(memcpy_erms
static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end)
void perf_mmap__read_done(struct perf_mmap *map)
void perf_mmap__munmap(struct perf_mmap *map)
static union perf_event * perf_mmap__read(struct perf_mmap *map, u64 *startp, u64 end)
union perf_event * perf_mmap__read_event(struct perf_mmap *map)
struct auxtrace_mmap_params auxtrace_mp
static bool perf_mmap__empty(struct perf_mmap *map)
size_t perf_mmap__mmap_len(struct perf_mmap *map)
static u64 perf_mmap__read_head(struct perf_mmap *mm)
int perf_mmap__push(struct perf_mmap *md, void *to, int push(void *to, void *buf, size_t size))
#define pr_debug3(fmt,...)
struct auxtrace_mmap auxtrace_mmap