vfparch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;
if ( vfparch < 2 )
- panic("Xen only support VFP 3");
+ panic("Xen only support VFP 3\n");
return 0;
}
local_irq_disable();
show_execution_state(regs);
- panic("bad mode");
+ panic("bad mode\n");
}
/*
res = guest_physmap_add_page(d, _gfn(mfn_x(smfn)), smfn, order);
if ( res )
- panic("Failed map pages to DOM0: %d", res);
+ panic("Failed map pages to DOM0: %d\n", res);
kinfo->unassigned_mem -= size;
/* Failed to allocate bank0 under 4GB */
if ( is_32bit_domain(d) )
- panic("Unable to allocate first memory bank.");
+ panic("Unable to allocate first memory bank\n");
/* Try to allocate memory from above 4GB */
printk(XENLOG_INFO "No bank has been allocated below 4GB.\n");
*/
if ((addrcells != 1 && addrcells != 2) ||
(sizecells != 1 && sizecells != 2))
- panic("Cannot cope with this size");
+ panic("Cannot cope with this size\n");
/* See linux Documentation/devicetree/bindings/arm/xen.txt */
res = fdt_begin_node(fdt, "hypervisor");
if ( res )
{
panic(XENLOG_ERR "Unable to map ACPI region 0x%"PRIx64
- " - 0x%"PRIx64" in domain \n",
+ " - 0x%"PRIx64" in domain\n",
addr & PAGE_MASK, PAGE_ALIGN(addr + size) - 1);
}
}
fdt_totalsize(kinfo->fdt));
if ( left != 0 )
- panic("Unable to copy the DTB to dom0 memory (left = %lu bytes)", left);
+ panic("Unable to copy the DTB to dom0 memory (left = %lu bytes)\n", left);
xfree(kinfo->fdt);
}
/* Fix up linux,initrd-start and linux,initrd-end in /chosen */
node = fdt_path_offset(kinfo->fdt, "/chosen");
if ( node < 0 )
- panic("Cannot find the /chosen node");
+ panic("Cannot find the /chosen node\n");
cellp = (__be32 *)val;
dt_set_cell(&cellp, ARRAY_SIZE(val), load_addr);
res = fdt_setprop_inplace(kinfo->fdt, node, "linux,initrd-start",
val, sizeof(val));
if ( res )
- panic("Cannot fix up \"linux,initrd-start\" property");
+ panic("Cannot fix up \"linux,initrd-start\" property\n");
cellp = (__be32 *)val;
dt_set_cell(&cellp, ARRAY_SIZE(val), load_addr + len);
res = fdt_setprop_inplace(kinfo->fdt, node, "linux,initrd-end",
val, sizeof(val));
if ( res )
- panic("Cannot fix up \"linux,initrd-end\" property");
+ panic("Cannot fix up \"linux,initrd-end\" property\n");
initrd = ioremap_wc(paddr, len);
if ( !initrd )
- panic("Unable to map the hwdom initrd");
+ panic("Unable to map the hwdom initrd\n");
res = copy_to_guest_phys_flush_dcache(kinfo->d, load_addr,
initrd, len);
if ( res != 0 )
- panic("Unable to copy the initrd in the hwdom memory");
+ panic("Unable to copy the initrd in the hwdom memory\n");
}
/*
base = ioremap_nocache(addr, size);
if ( !base )
- panic("GICv2: Cannot remap v2m register frame");
+ panic("GICv2: Cannot remap v2m register frame\n");
msi_typer = readl_relaxed(base + V2M_MSI_TYPER);
spi_start = V2M_MSI_TYPER_BASE_SPI(msi_typer);
/* Allocate an entry to record new v2m frame information. */
v2m_data = xzalloc_bytes(sizeof(struct v2m_data));
if ( !v2m_data )
- panic("GICv2: Cannot allocate memory for v2m frame");
+ panic("GICv2: Cannot allocate memory for v2m frame\n");
INIT_LIST_HEAD(&v2m_data->entry);
v2m_data->addr = addr;
/* Get register frame resource from DT. */
if ( dt_device_get_address(v2m, 0, &addr, &size) )
- panic("GICv2: Cannot find a valid v2m frame address");
+ panic("GICv2: Cannot find a valid v2m frame address\n");
/*
* Check whether DT uses msi-base-spi and msi-num-spis properties to
res = dt_device_get_address(node, 0, &dbase, NULL);
if ( res )
- panic("GICv2: Cannot find a valid address for the distributor");
+ panic("GICv2: Cannot find a valid address for the distributor\n");
res = dt_device_get_address(node, 1, &cbase, &csize);
if ( res )
- panic("GICv2: Cannot find a valid address for the CPU");
+ panic("GICv2: Cannot find a valid address for the CPU\n");
res = dt_device_get_address(node, 2, &hbase, NULL);
if ( res )
- panic("GICv2: Cannot find a valid address for the hypervisor");
+ panic("GICv2: Cannot find a valid address for the hypervisor\n");
res = dt_device_get_address(node, 3, &vbase, &vsize);
if ( res )
- panic("GICv2: Cannot find a valid address for the virtual CPU");
+ panic("GICv2: Cannot find a valid address for the virtual CPU\n");
res = platform_get_irq(node, 0);
if ( res < 0 )
- panic("GICv2: Cannot find the maintenance IRQ");
+ panic("GICv2: Cannot find the maintenance IRQ\n");
gicv2_info.maintenance_irq = res;
/* TODO: Add check on distributor */
{
const char *msg = acpi_format_exception(status);
- panic("GICv2: Failed to get MADT table, %s", msg);
+ panic("GICv2: Failed to get MADT table, %s\n", msg);
}
/* Collect CPU base addresses */
gic_acpi_parse_madt_cpu, table,
ACPI_MADT_TYPE_GENERIC_INTERRUPT, 0);
if ( count <= 0 )
- panic("GICv2: No valid GICC entries exists");
+ panic("GICv2: No valid GICC entries exists\n");
/*
* Find distributor base address. We expect one distributor entry since
gic_acpi_parse_madt_distributor, table,
ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 0);
if ( count <= 0 )
- panic("GICv2: No valid GICD entries exists");
+ panic("GICv2: No valid GICD entries exists\n");
}
#else
static void __init gicv2_acpi_init(void) { }
if ( (dbase & ~PAGE_MASK) || (cbase & ~PAGE_MASK) ||
(hbase & ~PAGE_MASK) || (vbase & ~PAGE_MASK) )
- panic("GICv2 interfaces not page aligned");
+ panic("GICv2 interfaces not page aligned\n");
gicv2.map_dbase = ioremap_nocache(dbase, PAGE_SIZE);
if ( !gicv2.map_dbase )
its_data = xzalloc(struct host_its);
if ( !its_data )
- panic("GICv3: Cannot allocate memory for ITS frame");
+ panic("GICv3: Cannot allocate memory for ITS frame\n");
its_data->addr = addr;
its_data->size = size;
continue;
if ( dt_device_get_address(its, 0, &addr, &size) )
- panic("GICv3: Cannot find a valid ITS frame address");
+ panic("GICv3: Cannot find a valid ITS frame address\n");
add_to_host_its_list(addr, size, its);
}
static void __init gicv3_ioremap_distributor(paddr_t dist_paddr)
{
if ( dist_paddr & ~PAGE_MASK )
- panic("GICv3: Found unaligned distributor address %"PRIpaddr"",
+ panic("GICv3: Found unaligned distributor address %"PRIpaddr"\n",
dbase);
gicv3.map_dbase = ioremap_nocache(dist_paddr, SZ_64K);
res = dt_device_get_address(node, 0, &dbase, NULL);
if ( res )
- panic("GICv3: Cannot find a valid distributor address");
+ panic("GICv3: Cannot find a valid distributor address\n");
gicv3_ioremap_distributor(dbase);
res = platform_get_irq(node, 0);
if ( res < 0 )
- panic("GICv3: Cannot find the maintenance IRQ");
+ panic("GICv3: Cannot find the maintenance IRQ\n");
gicv3_info.maintenance_irq = res;
/*
count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
gic_acpi_parse_madt_distributor, 0);
if ( count <= 0 )
- panic("GICv3: No valid GICD entries exists");
+ panic("GICv3: No valid GICD entries exists\n");
gicv3_ioremap_distributor(dbase);
count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
gic_acpi_get_madt_cpu_num, 0);
if (count <= 0)
- panic("GICv3: No valid GICR entries exists");
+ panic("GICv3: No valid GICR entries exists\n");
gicr_table = false;
}
count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
gic_acpi_parse_cpu_redistributor, count);
if ( count <= 0 )
- panic("GICv3: Can't get Redistributor entry");
+ panic("GICv3: Can't get Redistributor entry\n");
/* Collect CPU base addresses */
count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
gic_acpi_parse_madt_cpu, 0);
if ( count <= 0 )
- panic("GICv3: No valid GICC entries exists");
+ panic("GICv3: No valid GICC entries exists\n");
gicv3.rdist_stride = 0;
}
}
if ( !num_gics )
- panic("Unable to find compatible GIC in the device tree");
+ panic("Unable to find compatible GIC in the device tree\n");
/* Set the GIC as the primary interrupt controller */
dt_interrupt_controller = node;
header = acpi_table_get_entry_madt(ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 0);
if ( !header )
- panic("No valid GICD entries exists");
+ panic("No valid GICD entries exists\n");
dist = container_of(header, struct acpi_madt_generic_distributor, header);
if ( acpi_device_init(DEVICE_GIC, NULL, dist->version) )
- panic("Unable to find compatible GIC in the ACPI table");
+ panic("Unable to find compatible GIC in the ACPI table\n");
}
#else
static void __init gic_acpi_preinit(void) { }
void __init gic_init(void)
{
if ( gic_hw_ops->init() )
- panic("Failed to initialize the GIC drivers");
+ panic("Failed to initialize the GIC drivers\n");
/* Clear LR mask for cpu0 */
clear_cpu_lr_mask();
}
smp_call_function_interrupt();
break;
default:
- panic("Unhandled SGI %d on CPU%d", sgi, smp_processor_id());
+ panic("Unhandled SGI %d on CPU%d\n", sgi, smp_processor_id());
break;
}
paddr_t modbase;
if ( modsize + kernsize > ramsize )
- panic("Not enough memory in the first bank for the kernel+dtb+initrd");
+ panic("Not enough memory in the first bank for the kernel+dtb+initrd\n");
/*
* DTB must be loaded such that it does not conflict with the
modbase = kernbase - modsize;
else
{
- panic("Unable to find suitable location for dtb+initrd");
+ panic("Unable to find suitable location for dtb+initrd\n");
return;
}
kernel = ioremap_wc(paddr, len);
if ( !kernel )
- panic("Unable to map the hwdom kernel");
+ panic("Unable to map the hwdom kernel\n");
rc = copy_to_guest_phys_flush_dcache(info->d, load_addr,
kernel, len);
if ( rc != 0 )
- panic("Unable to copy the kernel in the hwdom memory");
+ panic("Unable to copy the kernel in the hwdom memory\n");
iounmap(kernel);
}
}
if ( base_mfn < mfn_x(xenheap_mfn_start) )
- panic("cannot add xenheap mapping at %lx below heap start %lx",
+ panic("cannot add xenheap mapping at %lx below heap start %lx\n",
base_mfn, mfn_x(xenheap_mfn_start));
end_mfn = base_mfn + nr_mfns;
vmid_mask = xzalloc_array(unsigned long, BITS_TO_LONGS(MAX_VMID));
if ( !vmid_mask )
- panic("Could not allocate VMID bitmap space");
+ panic("Could not allocate VMID bitmap space\n");
set_bit(INVALID_VMID, vmid_mask);
}
res = platform->init();
if ( res )
- panic("Unable to initialize the platform");
+ panic("Unable to initialize the platform\n");
}
int __init platform_init_time(void)
node = dt_find_interrupt_controller(xgene_dt_int_ctrl_match);
if ( !node )
- panic("%s: Can not find interrupt controller node", __func__);
+ panic("%s: Can not find interrupt controller node\n", __func__);
res = dt_device_get_address(node, 0, &dbase, NULL);
if ( !dbase )
- panic("%s: Cannot find a valid address for the distributor", __func__);
+ panic("%s: Cannot find a valid address for the distributor\n", __func__);
/*
* In old X-Gene Storm firmware and DT, secure mode addresses have
*/
if ( dbase == XGENE_SEC_GICV2_DIST_ADDR )
panic("OLD X-Gene Firmware is not supported by Xen.\n"
- "Please upgrade your firmware to the latest version");
+ "Please upgrade your firmware to the latest version\n");
}
static uint32_t xgene_storm_quirks(void)
}
if ( !paddr )
- panic("Not enough memory to relocate Xen");
+ panic("Not enough memory to relocate Xen\n");
printk("Placing Xen at 0x%"PRIpaddr"-0x%"PRIpaddr"\n",
paddr, paddr + min_size);
void *fdt;
if ( !bootinfo.mem.nr_banks )
- panic("No memory bank");
+ panic("No memory bank\n");
init_pdx();
} while ( !opt_xenheap_megabytes && xenheap_pages > 32<<(20-PAGE_SHIFT) );
if ( ! e )
- panic("Not not enough space for xenheap");
+ panic("Not not enough space for xenheap\n");
domheap_pages = heap_pages - xenheap_pages;
if ( !device_tree_flattened )
panic("Invalid device tree blob at physical address %#lx.\n"
"The DTB must be 8-byte aligned and must not exceed 2 MB in size.\n\n"
- "Please check your bootloader.",
+ "Please check your bootloader.\n",
fdt_paddr);
fdt_size = boot_fdt_info(device_tree_flattened, fdt_paddr);
dom0 = domain_create(0, &dom0_cfg, true);
if ( IS_ERR(dom0) || (alloc_dom0_vcpu0(dom0) == NULL) )
- panic("Error creating domain 0");
+ panic("Error creating domain 0\n");
if ( construct_dom0(dom0) != 0)
- panic("Could not set up DOM0 guest OS");
+ panic("Could not set up DOM0 guest OS\n");
heap_init_late();
{
if ( !zalloc_cpumask_var(&per_cpu(cpu_sibling_mask, cpu)) ||
!zalloc_cpumask_var(&per_cpu(cpu_core_mask, cpu)) )
- panic("No memory for CPU sibling/core maps");
+ panic("No memory for CPU sibling/core maps\n");
/* A CPU is a sibling with itself and is always on its own core. */
cpumask_set_cpu(cpu, per_cpu(cpu_sibling_mask, cpu));
timer = dt_find_matching_node(NULL, timer_ids);
if ( !timer )
- panic("Unable to find a compatible timer in the device tree");
+ panic("Unable to find a compatible timer in the device tree\n");
dt_device_set_used_by(timer, DOMID_XEN);
res = platform_init_time();
if ( res )
- panic("Timer: Cannot initialize platform timer");
+ panic("Timer: Cannot initialize platform timer\n");
boot_count = READ_SYSREG64(CNTPCT_EL0);
}
res = platform_get_irq(timer, i);
if ( res < 0 )
- panic("Timer: Unable to retrieve IRQ %u from the device tree", i);
+ panic("Timer: Unable to retrieve IRQ %u from the device tree\n", i);
timer_irq[i] = res;
}
}
/* Check that this CPU supports the Generic Timer interface */
if ( !cpu_has_gentimer )
- panic("CPU does not support the Generic Timer v1 interface");
+ panic("CPU does not support the Generic Timer v1 interface\n");
printk("Generic Timer IRQ: phys=%u hyp=%u virt=%u Freq: %lu KHz\n",
timer_irq[TIMER_PHYS_NONSECURE_PPI],
second_in_first ? " during second stage lookup" : "",
fsc_level_str(level));
- panic("Error during Hypervisor-to-physical address translation");
+ panic("Error during Hypervisor-to-physical address translation\n");
}
static void cpsr_switch_mode(struct cpu_user_regs *regs, int mode)
return 0;
show_execution_state(regs);
- panic("Xen BUG at %s%s:%d", prefix, filename, lineno);
+ panic("Xen BUG at %s%s:%d\n", prefix, filename, lineno);
case BUGFRAME_assert:
/* ASSERT: decode the predicate string pointer. */
if ( debugger_trap_fatal(TRAP_invalid_op, regs) )
return 0;
show_execution_state(regs);
- panic("Assertion '%s' failed at %s%s:%d",
+ panic("Assertion '%s' failed at %s%s:%d\n",
predicate, prefix, filename, lineno);
}
show_execution_state(regs);
break;
default:
- panic("DOM%d: Unhandled debug trap %#x", domid, code);
+ panic("DOM%d: Unhandled debug trap %#x\n", domid, code);
break;
}
}
const struct rdist_region *regions,
unsigned int intid_bits)
{
- panic("New VGIC implementation does not yet support GICv3.");
+ panic("New VGIC implementation does not yet support GICv3\n");
}
#endif
microcode_resume_cpu(0);
if ( !recheck_cpu_features(0) )
- panic("Missing previously available feature(s).");
+ panic("Missing previously available feature(s)\n");
/* Re-enabled default NMI/#MC use of MSR_SPEC_CTRL. */
ci->spec_ctrl_flags |= (default_spec_ctrl_flags & SCF_ist_wrmsr);
mdelay(1);
if ( !ACCESS_ONCE(alt_done) )
- panic("Timed out waiting for alternatives self-NMI to hit");
+ panic("Timed out waiting for alternatives self-NMI to hit\n");
set_nmi_callback(saved_nmi_callback);
}
printk("Not enabling x2APIC: depends on iommu_supports_eim.\n");
return;
}
- panic("x2APIC: already enabled by BIOS, but "
- "iommu_supports_eim failed");
+ panic("x2APIC: already enabled by BIOS, but iommu_supports_eim failed\n");
}
if ( (ioapic_entries = alloc_ioapic_entries()) == NULL )
default:
if ( x2apic_enabled )
panic("Interrupt remapping could not be enabled while "
- "x2APIC is already enabled by BIOS");
+ "x2APIC is already enabled by BIOS\n");
printk(XENLOG_ERR
"Failed to enable Interrupt Remapping: Will not enable x2APIC.\n");
" The processor has reported a hardware error which cannot\n"
" be recovered from. Xen will now reboot the machine.\n");
mc_panic_dump();
- panic("HARDWARE ERROR");
+ panic("HARDWARE ERROR\n");
}
/*
dprintk(XENLOG_ERR, "MCE delayed action failed\n");
is_mc_panic = true;
x86_mcinfo_dump(mctelem_dataptr(mctc));
- panic("MCE: Software recovery failed for the UCR");
+ panic("MCE: Software recovery failed for the UCR\n");
break;
case MCER_RECOVERED:
unsigned long rc;
if ( hypervisor_alloc_unused_page(&mfn) )
- panic("unable to reserve shared info memory page");
+ panic("unable to reserve shared info memory page\n");
xatp.gpfn = mfn_x(mfn);
rc = xen_hypercall_memory_op(XENMEM_add_to_physmap, &xatp);
if ( rc )
- panic("failed to map shared_info page: %ld", rc);
+ panic("failed to map shared_info page: %ld\n", rc);
set_fixmap(FIX_XEN_SHARED_INFO, mfn_x(mfn) << PAGE_SHIFT);
mem = rangeset_new(NULL, "host memory map", 0);
if ( !mem )
- panic("failed to allocate PFN usage rangeset");
+ panic("failed to allocate PFN usage rangeset\n");
/*
* Mark up to the last memory page (or 4GiB) as RAM. This is done because
*/
if ( rangeset_add_range(mem, 0, max_t(unsigned long, max_page - 1,
PFN_DOWN(GB(4) - 1))) )
- panic("unable to add RAM to in-use PFN rangeset");
+ panic("unable to add RAM to in-use PFN rangeset\n");
for ( i = 0; i < e820.nr_map; i++ )
{
if ( rangeset_add_range(mem, PFN_DOWN(e->addr),
PFN_UP(e->addr + e->size - 1)) )
- panic("unable to add range [%#lx, %#lx] to in-use PFN rangeset",
+ panic("unable to add range [%#lx, %#lx] to in-use PFN rangeset\n",
PFN_DOWN(e->addr), PFN_UP(e->addr + e->size - 1));
}
}
rc = xen_hypercall_set_evtchn_upcall_vector(this_cpu(vcpu_id),
evtchn_upcall_vector);
if ( rc )
- panic("Unable to set evtchn upcall vector: %d", rc);
+ panic("Unable to set evtchn upcall vector: %d\n", rc);
/* Trick toolstack to think we are enlightened */
{
if ( !e820_change_range_type(e820, pfn << PAGE_SHIFT,
(pfn << PAGE_SHIFT) + PAGE_SIZE,
E820_RESERVED, E820_RAM) )
- panic("Unable to add/change memory type of pfn %#lx to RAM", pfn);
+ panic("Unable to add/change memory type of pfn %#lx to RAM\n", pfn);
}
void __init hypervisor_fixup_e820(struct e820map *e820)
#define MARK_PARAM_RAM(p) ({ \
rc = xen_hypercall_hvm_get_param(p, &pfn); \
if ( rc ) \
- panic("Unable to get " #p); \
+ panic("Unable to get " #p "\n"); \
mark_pfn_as_ram(e820, pfn); \
ASSERT(i < ARRAY_SIZE(reserved_pages)); \
reserved_pages[i++] = pfn << PAGE_SHIFT; \
*/
bitmap_zero(vcpu_info_mapped, NR_CPUS);
if ( map_vcpuinfo() && nr_cpu_ids > XEN_LEGACY_MAX_VCPUS )
- panic("unable to remap vCPU info and vCPUs > legacy limit");
+ panic("unable to remap vCPU info and vCPUs > legacy limit\n");
/* Setup event channel upcall vector. */
init_evtchn();
*/
d->arch.e820 = xzalloc_array(struct e820entry, e820.nr_map + 1);
if ( !d->arch.e820 )
- panic("Unable to allocate memory for Dom0 e820 map");
+ panic("Unable to allocate memory for Dom0 e820 map\n");
entry_guest = d->arch.e820;
/* Clamp e820 memory map to match the memory assigned to Dom0 */
/* Guest already enabled an interrupt window. */
return;
default:
- panic("%s: nestedsvm_vcpu_interrupt can't handle value %#x",
+ panic("%s: nestedsvm_vcpu_interrupt can't handle value %#x\n",
__func__, rc);
}
}
if (entry->pin != -1) {
if (irq_2_pin_free_entry >= PIN_MAP_SIZE)
- panic("io_apic.c: whoops");
+ panic("io_apic.c: whoops\n");
entry->next = irq_2_pin_free_entry;
entry = irq_2_pin + entry->next;
irq_2_pin_free_entry = entry->next;
if (!physid_isset(i, phys_id_present_map))
break;
if (i >= get_physical_broadcast())
- panic("Max APIC ID exceeded");
+ panic("Max APIC ID exceeded\n");
printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
i);
mp_ioapics[apic].mpc_apicid = i;
}
printk(" failed :(.\n");
panic("IO-APIC + timer doesn't work! Boot with apic_verbosity=debug "
- "and send a report. Then try booting with the 'noapic' option");
+ "and send a report. Then try booting with the 'noapic' option\n");
}
/*
}
if (i == get_physical_broadcast())
- panic("Max apic_id exceeded");
+ panic("Max apic_id exceeded\n");
printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
"trying %d\n", ioapic, apic_id, i);
l->unlock_level = __get_lock_level();
}
else if ( (unlikely(!rec)) )
- panic("mm lock already held by %s", l->locker_function);
+ panic("mm lock already held by %s\n", l->locker_function);
__set_lock_level(level);
}
if (nr_ioapics >= MAX_IO_APICS) {
printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n",
MAX_IO_APICS, nr_ioapics);
- panic("Recompile kernel with bigger MAX_IO_APICS");
+ panic("Recompile kernel with bigger MAX_IO_APICS\n");
}
if (!m->mpc_apicaddr) {
printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
(m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
if (++mp_irq_entries == MAX_IRQ_SOURCES)
- panic("Max # of irq sources exceeded");
+ panic("Max # of irq sources exceeded\n");
}
static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
if (nr_ioapics >= MAX_IO_APICS) {
printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
"(found %d)\n", MAX_IO_APICS, nr_ioapics);
- panic("Recompile kernel with bigger MAX_IO_APICS");
+ panic("Recompile kernel with bigger MAX_IO_APICS\n");
}
if (!address) {
printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
mp_irqs[mp_irq_entries] = intsrc;
if (++mp_irq_entries == MAX_IRQ_SOURCES)
- panic("Max # of irq sources exceeded");
+ panic("Max # of irq sources exceeded\n");
return;
}
mp_irqs[mp_irq_entries] = intsrc;
if (++mp_irq_entries == MAX_IRQ_SOURCES)
- panic("Max # of irq sources exceeded");
+ panic("Max # of irq sources exceeded\n");
}
}
!(node_start_pfn(node) >> (32 - PAGE_SHIFT)) )
break;
if ( node >= MAX_NUMNODES )
- panic("No node with memory below 4Gb");
+ panic("No node with memory below 4Gb\n");
/*
* Try to not reserve the whole node's memory for DMA, but dividing
l1_pgentry_t *pl1e = NULL;
if ( v_start <= vphysmap_end && vphysmap_start <= v_end )
- panic("DOM0 P->M table overlaps initial mapping");
+ panic("DOM0 P->M table overlaps initial mapping\n");
while ( vphysmap_start < vphysmap_end )
{
if ( d->tot_pages + ((round_pgup(vphysmap_end) - vphysmap_start)
>> PAGE_SHIFT) + 3 > nr_pages )
- panic("Dom0 allocation too small for initial P->M table");
+ panic("Dom0 allocation too small for initial P->M table\n");
if ( pl1e )
{
vphysmap_start &= PAGE_MASK;
}
if ( !page )
- panic("Not enough RAM for DOM0 P->M table");
+ panic("Not enough RAM for DOM0 P->M table\n");
if ( pl1e )
unmap_domain_page(pl1e);
value = (parms.virt_hv_start_low + mask) & ~mask;
BUG_ON(!is_pv_32bit_domain(d));
if ( value > __HYPERVISOR_COMPAT_VIRT_START )
- panic("Domain 0 expects too high a hypervisor start address");
+ panic("Domain 0 expects too high a hypervisor start address\n");
HYPERVISOR_COMPAT_VIRT_START(d) =
max_t(unsigned int, m2p_compat_vstart, value);
}
count -= PAGE_ALIGN(initrd_len);
order = get_order_from_bytes(count);
if ( (1UL << order) + PFN_UP(initrd_len) > nr_pages )
- panic("Domain 0 allocation is too small for kernel image");
+ panic("Domain 0 allocation is too small for kernel image\n");
if ( parms.p2m_base != UNSET_ADDR )
{
}
page = alloc_domheap_pages(d, order, 0);
if ( page == NULL )
- panic("Not enough RAM for domain 0 allocation");
+ panic("Not enough RAM for domain 0 allocation\n");
alloc_spfn = mfn_x(page_to_mfn(page));
alloc_epfn = alloc_spfn + d->tot_pages;
order = get_order_from_pages(count);
page = alloc_domheap_pages(d, order, 0);
if ( !page )
- panic("Not enough RAM for domain 0 initrd");
+ panic("Not enough RAM for domain 0 initrd\n");
for ( count = -count; order--; )
if ( count & (1UL << order) )
{
{
page = alloc_domheap_page(d, MEMF_no_owner);
if ( !page )
- panic("Not enough RAM for domain 0 PML4");
+ panic("Not enough RAM for domain 0 PML4\n");
page->u.inuse.type_info = PGT_l4_page_table|PGT_validated|1;
l4start = l4tab = page_to_virt(page);
maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l3_page_table;
while ( pfn < nr_pages )
{
if ( (page = alloc_chunk(d, nr_pages - d->tot_pages)) == NULL )
- panic("Not enough RAM for DOM0 reservation");
+ panic("Not enough RAM for DOM0 reservation\n");
while ( pfn < d->tot_pages )
{
mfn = mfn_x(page_to_mfn(page));
pv_destroy_gdt(v);
if ( test_bit(XENFEAT_supervisor_mode_kernel, parms.f_required) )
- panic("Dom0 requires supervisor-mode execution");
+ panic("Dom0 requires supervisor-mode execution\n");
rc = dom0_setup_permissions(d);
BUG_ON(rc != 0);
}
if ( total_pages - avail > shim_nrpages )
- panic("pages used by shim > shim_nrpages (%#lx > %#lx)",
+ panic("pages used by shim > shim_nrpages (%#lx > %#lx)\n",
total_pages - avail, shim_nrpages);
shim_nrpages -= total_pages - avail;
/* Check that we have at least one Multiboot module. */
if ( !(mbi->flags & MBI_MODULES) || (mbi->mods_count == 0) )
- panic("dom0 kernel not specified. Check bootloader configuration.");
+ panic("dom0 kernel not specified. Check bootloader configuration\n");
if ( pvh_boot )
{
e820_raw.nr_map = 2;
}
else
- panic("Bootloader provided no memory information.");
+ panic("Bootloader provided no memory information\n");
/* Sanitise the raw E820 map to produce a final clean version. */
max_page = raw_max_page = init_e820(memmap_type, &e820_raw);
for ( i = 0; !efi_enabled(EFI_LOADER) && i < mbi->mods_count; i++ )
{
if ( mod[i].mod_start & (PAGE_SIZE - 1) )
- panic("Bootloader didn't honor module alignment request.");
+ panic("Bootloader didn't honor module alignment request\n");
mod[i].mod_end -= mod[i].mod_start;
mod[i].mod_start >>= PAGE_SHIFT;
mod[i].reserved = 0;
}
if ( modules_headroom && !mod->reserved )
- panic("Not enough memory to relocate the dom0 kernel image.");
+ panic("Not enough memory to relocate the dom0 kernel image\n");
for ( i = 0; i < mbi->mods_count; ++i )
{
uint64_t s = (uint64_t)mod[i].mod_start << PAGE_SHIFT;
}
if ( !xen_phys_start )
- panic("Not enough memory to relocate Xen.");
+ panic("Not enough memory to relocate Xen\n");
/* This needs to remain in sync with xen_in_range(). */
reserve_e820_ram(&boot_e820, __pa(_stext), __pa(__2M_rwdata_end));
watchdog_setup();
if ( !tboot_protect_mem_regions() )
- panic("Could not protect TXT memory regions");
+ panic("Could not protect TXT memory regions\n");
init_guest_cpuid();
init_guest_msr_policy();
/* Create initial domain 0. */
dom0 = domain_create(get_initial_domain_id(), &dom0_cfg, !pv_shim);
if ( IS_ERR(dom0) || (alloc_dom0_vcpu0(dom0) == NULL) )
- panic("Error creating domain 0");
+ panic("Error creating domain 0\n");
/* Grab the DOM0 command line. */
cmdline = (char *)(mod[0].string ? __va(mod[0].string) : NULL);
if ( construct_dom0(dom0, mod, modules_headroom,
(initrdidx > 0) && (initrdidx < mbi->mods_count)
? mod + initrdidx : NULL, cmdline) != 0)
- panic("Could not set up DOM0 guest OS");
+ panic("Could not set up DOM0 guest OS\n");
if ( cpu_has_smap )
{
socket_cpumask = xzalloc_array(cpumask_t *, nr_sockets);
if ( socket_cpumask == NULL ||
(socket_cpumask[cpu_to_socket(0)] = xzalloc(cpumask_t)) == NULL )
- panic("No memory for socket CPU siblings map");
+ panic("No memory for socket CPU siblings map\n");
if ( !zalloc_cpumask_var(&per_cpu(cpu_sibling_mask, 0)) ||
!zalloc_cpumask_var(&per_cpu(cpu_core_mask, 0)) )
- panic("No memory for boot CPU sibling/core maps");
+ panic("No memory for boot CPU sibling/core maps\n");
set_cpu_sibling_map(0);
printk("MAC for %s before S3 is: 0x%08"PRIx64"\n", what, orig_mac);
printk("MAC for %s after S3 is: 0x%08"PRIx64"\n", what, resume_mac);
- panic("Memory integrity was lost on resume (%d)", error);
+ panic("Memory integrity was lost on resume (%d)\n", error);
}
int tboot_wake_ap(int apicid, unsigned long sipi_vec)
}
if ( rc <= 0 )
- panic("Unable to find usable platform timer");
+ panic("Unable to find usable platform timer\n");
printk("Platform timer is %s %s\n",
freq_string(pts->frequency), pts->name);
cmos_rtc_probe = false;
else if ( system_state < SYS_STATE_smp_boot && !cmos_rtc_probe )
panic("System with no CMOS RTC advertised must be booted from EFI"
- " (or with command line option \"cmos-rtc-probe\")");
+ " (or with command line option \"cmos-rtc-probe\")\n");
for ( ; ; )
{
}
if ( unlikely(cmos_rtc_probe) )
- panic("No CMOS RTC found - system must be booted from EFI");
+ panic("No CMOS RTC found - system must be booted from EFI\n");
return mktime(rtc.year, rtc.mon, rtc.day, rtc.hour, rtc.min, rtc.sec);
}
}
panic("FATAL TRAP: vector = %d (%s)\n"
- "[error_code=%04x] %s",
+ "[error_code=%04x] %s\n",
trapnr, trapstr(trapnr), regs->error_code,
(regs->eflags & X86_EFLAGS_IF) ? "" : ", IN INTERRUPT CONTEXT");
}
return;
show_execution_state(regs);
- panic("FATAL RESERVED TRAP %#x: %s", trapnr, trapstr(trapnr));
+ panic("FATAL RESERVED TRAP %#x: %s\n", trapnr, trapstr(trapnr));
}
static void do_trap(struct cpu_user_regs *regs)
show_execution_state(regs);
panic("FATAL TRAP: vector = %d (%s)\n"
- "[error_code=%04x]",
+ "[error_code=%04x]\n",
trapnr, trapstr(trapnr), regs->error_code);
}
return;
show_execution_state(regs);
- panic("Xen BUG at %s%s:%d", prefix, filename, lineno);
+ panic("Xen BUG at %s%s:%d\n", prefix, filename, lineno);
case BUGFRAME_assert:
/* ASSERT: decode the predicate string pointer. */
return;
show_execution_state(regs);
- panic("Assertion '%s' failed at %s%s:%d",
+ panic("Assertion '%s' failed at %s%s:%d\n",
predicate, prefix, filename, lineno);
}
return;
show_execution_state(regs);
- panic("FATAL TRAP: vector = %d (invalid opcode)", TRAP_invalid_op);
+ panic("FATAL TRAP: vector = %d (invalid opcode)\n", TRAP_invalid_op);
}
void do_int3(struct cpu_user_regs *regs)
show_page_walk(addr);
panic("FATAL PAGE FAULT\n"
"[error_code=%04x]\n"
- "Faulting linear address: %p",
+ "Faulting linear address: %p\n",
error_code, _p(addr));
}
return;
show_execution_state(regs);
- panic("GENERAL PROTECTION FAULT\n[error_code=%04x]", regs->error_code);
+ panic("GENERAL PROTECTION FAULT\n[error_code=%04x]\n", regs->error_code);
}
static void pci_serr_softirq(void)
return;
nomem:
- panic("Not enough memory for m2p table");
+ panic("Not enough memory for m2p table\n");
}
void __init zap_low_mappings(void)
show_code(regs);
show_stack_overflow(cpu, regs);
- panic("DOUBLE FAULT -- system shutdown");
+ panic("DOUBLE FAULT -- system shutdown\n");
}
static unsigned int write_stub_trampoline(
if ( domid == 0 || domid == hardware_domid )
{
if ( hardware_domid < 0 || hardware_domid >= DOMID_FIRST_RESERVED )
- panic("The value of hardware_dom must be a valid domain ID");
+ panic("The value of hardware_dom must be a valid domain ID\n");
d->is_pinned = opt_dom0_vcpus_pin;
d->disable_migrate = 1;
static __init void error(char *x)
{
- panic("%s", x);
+ panic("%s\n", x);
}
static __init int fill_inbuf(void)
printk("Using scheduler: %s (%s)\n", ops.name, ops.opt_name);
if ( SCHED_OP(&ops, init) )
- panic("scheduler returned error on init");
+ panic("scheduler returned error on init\n");
if ( sched_ratelimit_us &&
(sched_ratelimit_us > XEN_SYSCTL_SCHED_RATELIMIT_MAX
ubsan_prologue(&data->location, &flags);
pr_err("calling __builtin_unreachable()\n");
ubsan_epilogue(&flags);
- panic("can't return from __builtin_unreachable()");
+ panic("can't return from __builtin_unreachable()\n");
}
EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable);
void __init warning_add(const char *warning)
{
if ( nr_warnings >= WARNING_ARRAY_SIZE )
- panic("Too many pieces of warning text.");
+ panic("Too many pieces of warning text\n");
warnings[nr_warnings] = warning;
nr_warnings++;
console_start_sync();
printk("\n****************************************\n");
printk("Panic on CPU %d:\n", smp_processor_id());
- printk("%s\n", buf);
+ printk("%s", buf);
printk("****************************************\n\n");
if ( opt_noreboot )
printk("Manual reset required ('noreboot' specified)\n");
arch_iommu_check_autotranslated_hwdom(d);
if ( iommu_passthrough )
- panic("Dom0 uses paging translated mode, dom0-passthrough must not be "
- "enabled\n");
+ panic("Dom0 uses paging translated mode, dom0-passthrough must not be enabled\n");
iommu_dom0_strict = 1;
}
if ( (force_iommu && !iommu_enabled) ||
(force_intremap && !iommu_intremap) )
- panic("Couldn't enable %s and iommu=required/force",
+ panic("Couldn't enable %s and iommu=required/force\n",
!iommu_enabled ? "IOMMU" : "Interrupt Remapping");
if ( !iommu_intremap )
{
radix_tree_init(&pci_segments);
if ( !alloc_pseg(0) )
- panic("Could not initialize PCI segment 0");
+ panic("Could not initialize PCI segment 0\n");
}
int __init pci_add_segment(u16 seg)
if ( !kexecing ) \
{ \
dump_execution_state(); \
- panic("DMAR hardware malfunction"); \
+ panic("DMAR hardware malfunction\n"); \
} \
break; \
} \
if ( !is_igd_vt_enabled_quirk() )
{
if ( force_iommu )
- panic("BIOS did not enable IGD for VT properly, crash Xen for security purpose");
+ panic("BIOS did not enable IGD for VT properly, crash Xen for security purpose\n");
printk(XENLOG_WARNING VTDPREFIX
"BIOS did not enable IGD for VT properly. Disabling IGD VT-d engine.\n");
return;
if ( init_vtd_hw() != 0 && force_iommu )
- panic("IOMMU setup failed, crash Xen for security purpose");
+ panic("IOMMU setup failed, crash Xen for security purpose\n");
for_each_drhd_unit ( drhd )
{
avc_init();
if ( register_xsm(&flask_ops) )
- panic("Flask: Unable to register with XSM");
+ panic("Flask: Unable to register with XSM\n");
if ( policy_size && flask_bootparam != FLASK_BOOTPARAM_LATELOAD )
ret = security_load_policy(policy_buffer, policy_size);
if ( ret && flask_bootparam == FLASK_BOOTPARAM_ENFORCING )
- panic("Unable to load FLASK policy");
+ panic("Unable to load FLASK policy\n");
if ( ret )
printk(XENLOG_INFO "Flask: Access controls disabled until policy is loaded.\n");