/* Number of records lost due to per-CPU trace buffer being full. */
static DEFINE_PER_CPU(unsigned long, lost_records);
+static DEFINE_PER_CPU(unsigned long, lost_records_first_tsc);
/* a flag recording whether initialization has been done */
/* or more properly, if the tbuf subsystem is enabled right now */
NULL);
}
-#define LOST_REC_SIZE 8
+#define LOST_REC_SIZE (4 + 8 + 16) /* header + tsc + sizeof(struct ed) */
static inline int insert_lost_records(struct t_buf *buf)
{
struct {
u32 lost_records;
- } ed;
+ u32 did:16, vid:16;
+ u64 first_tsc;
+ } __attribute__((packed)) ed;
+ ed.vid = current->vcpu_id;
+ ed.did = current->domain->domain_id;
ed.lost_records = this_cpu(lost_records);
+ ed.first_tsc = this_cpu(lost_records_first_tsc);
this_cpu(lost_records) = 0;
return __insert_record(buf,
TRC_LOST_RECORDS,
sizeof(ed),
- 0 /* !cycles */,
+ 1 /* cycles */,
LOST_REC_SIZE,
(unsigned char *)&ed);
}
/* Do we have enough space for everything? */
if ( total_size > bytes_to_tail )
{
- this_cpu(lost_records)++;
+ if ( ++this_cpu(lost_records) == 1 )
+ this_cpu(lost_records_first_tsc)=(u64)get_cycles();
local_irq_restore(flags);
return;
}