Update to Linux 2.6.11.
Signed-off-by: Christian Limpach <chris@xensource.com>
3e5a4e683HKVU-sxtagrDasRB8eBVw linux-2.4.29-xen-sparse/mm/swapfile.c
41180721bNns9Na7w1nJ0ZVt8bhUNA linux-2.4.29-xen-sparse/mm/vmalloc.c
41505c57WAd5l1rlfCLNSCpx9J13vA linux-2.4.29-xen-sparse/net/core/skbuff.c
- 40f562372u3A7_kfbYYixPHJJxYUxA linux-2.6.10-xen-sparse/arch/xen/Kconfig
- 40f56237utH41NPukqHksuNf29IC9A linux-2.6.10-xen-sparse/arch/xen/Kconfig.drivers
- 40f56237penAAlWVBVDpeQZNFIg8CA linux-2.6.10-xen-sparse/arch/xen/Makefile
- 40f56237JTc60m1FRlUxkUaGSQKrNw linux-2.6.10-xen-sparse/arch/xen/boot/Makefile
- 40f56237hRxbacU_3PdoAl6DjZ3Jnw linux-2.6.10-xen-sparse/arch/xen/configs/xen0_defconfig
- 40f56237wubfjJKlfIzZlI3ZM2VgGA linux-2.6.10-xen-sparse/arch/xen/configs/xenU_defconfig
- 40f56237Mta0yHNaMS_qtM2rge0qYA linux-2.6.10-xen-sparse/arch/xen/i386/Kconfig
- 40f56238u2CJdXNpjsZgHBxeVyY-2g linux-2.6.10-xen-sparse/arch/xen/i386/Makefile
- 40f56238eczveJ86k_4hNxCLRQIF-g linux-2.6.10-xen-sparse/arch/xen/i386/kernel/Makefile
- 40f56238rXVTJQKbBuXXLH52qEArcg linux-2.6.10-xen-sparse/arch/xen/i386/kernel/cpu/Makefile
- 40f562385s4lr6Zg92gExe7UQ4A76Q linux-2.6.10-xen-sparse/arch/xen/i386/kernel/cpu/common.c
- 41ab440bnpxZdWShZrGgM9pPaz5rmA linux-2.6.10-xen-sparse/arch/xen/i386/kernel/cpu/mtrr/Makefile
- 41ab440bBKWz-aEOEojU4PAMXe3Ppg linux-2.6.10-xen-sparse/arch/xen/i386/kernel/cpu/mtrr/main.c
- 40f56238XDtHSijkAFlbv1PT8Bhw_Q linux-2.6.10-xen-sparse/arch/xen/i386/kernel/entry.S
- 40f56238bnvciAuyzAiMkdzGErYt1A linux-2.6.10-xen-sparse/arch/xen/i386/kernel/head.S
- 40f58a0d31M2EkuPbG94ns_nOi0PVA linux-2.6.10-xen-sparse/arch/xen/i386/kernel/i386_ksyms.c
- 40faa751_zbZlAmLyQgCXdYekVFdWA linux-2.6.10-xen-sparse/arch/xen/i386/kernel/ioport.c
+41d00d82zN8IfLBRxc7G_i7lbwT3cQ linux-2.6.10-xen-sparse/arch/xen/i386/kernel/irq.c
- 40f56238ue3YRsK52HG7iccNzP1AwQ linux-2.6.10-xen-sparse/arch/xen/i386/kernel/ldt.c
- 41d54a76YMCA67S8J-TBT3J62Wx6yA linux-2.6.10-xen-sparse/arch/xen/i386/kernel/microcode.c
- 4107adf1cNtsuOxOB4T6paAoY2R2PA linux-2.6.10-xen-sparse/arch/xen/i386/kernel/pci-dma.c
- 40f56238a8iOVDEoostsbun_sy2i4g linux-2.6.10-xen-sparse/arch/xen/i386/kernel/process.c
- 40f56238YQIJoYG2ehDGEcdTgLmGbg linux-2.6.10-xen-sparse/arch/xen/i386/kernel/setup.c
- 40f56238nWMQg7CKbyTy0KJNvCzbtg linux-2.6.10-xen-sparse/arch/xen/i386/kernel/signal.c
+41811cac4lkCB-fHir6CcxuEJ2pGsQ linux-2.6.10-xen-sparse/arch/xen/i386/kernel/smp.c
+41811ca9mbGpqBrZVrUGEiv8CTV3ng linux-2.6.10-xen-sparse/arch/xen/i386/kernel/smpboot.c
- 40f56238qVGkpO_ycnQA8k03kQzAgA linux-2.6.10-xen-sparse/arch/xen/i386/kernel/time.c
- 40f56238NzTgeO63RGoxHrW5NQeO3Q linux-2.6.10-xen-sparse/arch/xen/i386/kernel/timers/Makefile
- 40f56238BMqG5PuSHufpjbvp_helBw linux-2.6.10-xen-sparse/arch/xen/i386/kernel/timers/timer_tsc.c
- 40f562389xNa78YBZciUibQjyRU_Lg linux-2.6.10-xen-sparse/arch/xen/i386/kernel/traps.c
- 40f56238JypKAUG01ZojFwH7qnZ5uA linux-2.6.10-xen-sparse/arch/xen/i386/kernel/vsyscall.S
- 40f56238wi6AdNQjm0RT57bSkwb6hg linux-2.6.10-xen-sparse/arch/xen/i386/kernel/vsyscall.lds
- 40f56238a3w6-byOzexIlMgni76Lcg linux-2.6.10-xen-sparse/arch/xen/i386/mm/Makefile
- 40f56238ILx8xlbywNbzTdv5Zr4xXQ linux-2.6.10-xen-sparse/arch/xen/i386/mm/fault.c
- 4118cc35CbY8rfGVspF5O-7EkXBEAA linux-2.6.10-xen-sparse/arch/xen/i386/mm/highmem.c
- 40f562383SKvDStdtrvzr5fyCbW4rw linux-2.6.10-xen-sparse/arch/xen/i386/mm/hypervisor.c
- 40f56239xcNylAxuGsQHwi1AyMLV8w linux-2.6.10-xen-sparse/arch/xen/i386/mm/init.c
- 41062ab7CjxC1UBaFhOMWWdhHkIUyg linux-2.6.10-xen-sparse/arch/xen/i386/mm/ioremap.c
- 413b5ab8LIowAnQrEmaOJSdmqm96jQ linux-2.6.10-xen-sparse/arch/xen/i386/mm/pageattr.c
- 40f5623906UYHv1rsVUeRc0tFT0dWw linux-2.6.10-xen-sparse/arch/xen/i386/mm/pgtable.c
- 4107adf12ndy94MidCaivDibJ3pPAg linux-2.6.10-xen-sparse/arch/xen/i386/pci/Makefile
- 4107adf1WcCgkhsdLTRGX52cOG1vJg linux-2.6.10-xen-sparse/arch/xen/i386/pci/direct.c
- 4107adf1s5u6249DNPUViX1YNagbUQ linux-2.6.10-xen-sparse/arch/xen/i386/pci/irq.c
- 40f56239zOksGg_H4XD4ye6iZNtoZA linux-2.6.10-xen-sparse/arch/xen/kernel/Makefile
- 40f56239bvOjuuuViZ0XMlNiREFC0A linux-2.6.10-xen-sparse/arch/xen/kernel/ctrl_if.c
- 41ab6fa06JdF7jxUsuDcjN3UhuIAxg linux-2.6.10-xen-sparse/arch/xen/kernel/devmem.c
- 40f56238xFQe9T7M_U_FItM-bZIpLw linux-2.6.10-xen-sparse/arch/xen/kernel/evtchn.c
- 4110f478aeQWllIN7J4kouAHiAqrPw linux-2.6.10-xen-sparse/arch/xen/kernel/fixup.c
- 412dfae9eA3_6e6bCGUtg1mj8b56fQ linux-2.6.10-xen-sparse/arch/xen/kernel/gnttab.c
- 40f562392LBhwmOxVPsYdkYXMxI_ZQ linux-2.6.10-xen-sparse/arch/xen/kernel/reboot.c
- 414c113396tK1HTVeUalm3u-1DF16g linux-2.6.10-xen-sparse/arch/xen/kernel/skbuff.c
+418f90e4lGdeJK9rmbOB1kN-IKSjsQ linux-2.6.10-xen-sparse/arch/xen/kernel/smp.c
- 3f68905c5eiA-lBMQSvXLMWS1ikDEA linux-2.6.10-xen-sparse/arch/xen/kernel/xen_proc.c
- 41261688yS8eAyy-7kzG4KBs0xbYCA linux-2.6.10-xen-sparse/drivers/Makefile
- 4108f5c1WfTIrs0HZFeV39sttekCTw linux-2.6.10-xen-sparse/drivers/char/mem.c
- 4111308bZAIzwf_Kzu6x1TZYZ3E0_Q linux-2.6.10-xen-sparse/drivers/char/tty_io.c
- 40f56239Dp_vMTgz8TEbvo1hjHGc3w linux-2.6.10-xen-sparse/drivers/xen/Makefile
- 41768fbcncpBQf8s2l2-CwoSNIZ9uA linux-2.6.10-xen-sparse/drivers/xen/balloon/Makefile
- 3e6377f8i5e9eGz7Pw6fQuhuTQ7DQg linux-2.6.10-xen-sparse/drivers/xen/balloon/balloon.c
- 410d0893otFGghmv4dUXDUBBdY5aIA linux-2.6.10-xen-sparse/drivers/xen/blkback/Makefile
- 4087cf0d1XgMkooTZAiJS6NrcpLQNQ linux-2.6.10-xen-sparse/drivers/xen/blkback/blkback.c
- 4087cf0dZadZ8r6CEt4fNN350Yle3A linux-2.6.10-xen-sparse/drivers/xen/blkback/common.h
- 4087cf0dxlh29iw0w-9rxOCEGCjPcw linux-2.6.10-xen-sparse/drivers/xen/blkback/control.c
- 4087cf0dbuoH20fMjNZjcgrRK-1msQ linux-2.6.10-xen-sparse/drivers/xen/blkback/interface.c
- 4087cf0dk97tacDzxfByWV7JifUYqA linux-2.6.10-xen-sparse/drivers/xen/blkback/vbd.c
- 40f56239Sfle6wGv5FS0wjS_HI150A linux-2.6.10-xen-sparse/drivers/xen/blkfront/Kconfig
- 40f562395atl9x4suKGhPkjqLOXESg linux-2.6.10-xen-sparse/drivers/xen/blkfront/Makefile
- 40f56239-JNIaTzlviVJohVdoYOUpw linux-2.6.10-xen-sparse/drivers/xen/blkfront/blkfront.c
- 40f56239y9naBTXe40Pi2J_z3p-d1g linux-2.6.10-xen-sparse/drivers/xen/blkfront/block.h
- 40f56239BVfPsXBiWQitXgDRtOsiqg linux-2.6.10-xen-sparse/drivers/xen/blkfront/vbd.c
+41a226e0vjAcDXHOnXE5ummcdUD2mg linux-2.6.10-xen-sparse/drivers/xen/blktap/Makefile
+41a226e0VeZA1N8tbU6nvJ3OxUcJmw linux-2.6.10-xen-sparse/drivers/xen/blktap/blktap.c
+41a226e1k4J5VMLnrYXDWRqElS49YQ linux-2.6.10-xen-sparse/drivers/xen/blktap/blktap.h
+41a226e1-A_Hy7utS8vJKaXnH_tzfA linux-2.6.10-xen-sparse/drivers/xen/blktap/blktap_controlmsg.c
+41a226e19NoUUTOvs7jumDMRYDIO4Q linux-2.6.10-xen-sparse/drivers/xen/blktap/blktap_datapath.c
+41a226e1MNSyWWK5dEVgvSQ5OW0fDA linux-2.6.10-xen-sparse/drivers/xen/blktap/blktap_userdev.c
- 40f56239fsLjvtD8YBRAWphps4FDjg linux-2.6.10-xen-sparse/drivers/xen/console/Makefile
- 3e5a4e651TH-SXHoufurnWjgl5bfOA linux-2.6.10-xen-sparse/drivers/xen/console/console.c
- 40f56239KYxO0YabhPzCTeUuln-lnA linux-2.6.10-xen-sparse/drivers/xen/evtchn/Makefile
- 40f56239DoibTX6R-ZYd3QTXAB8_TA linux-2.6.10-xen-sparse/drivers/xen/evtchn/evtchn.c
- 410a9817HEVJvred5Oy_uKH3HFJC5Q linux-2.6.10-xen-sparse/drivers/xen/netback/Makefile
- 4097ba831lpGeLlPg-bfV8XarVVuoQ linux-2.6.10-xen-sparse/drivers/xen/netback/common.h
- 4097ba83wvv8yi5P5xugCUBAdb6O-A linux-2.6.10-xen-sparse/drivers/xen/netback/control.c
- 4097ba83byY5bTSugJGZ1exTxIcMKw linux-2.6.10-xen-sparse/drivers/xen/netback/interface.c
- 4087cf0dGmSbFhFZyIZBJzvqxY-qBw linux-2.6.10-xen-sparse/drivers/xen/netback/netback.c
- 40f56239lrg_Ob0BJ8WBFS1zeg2CYw linux-2.6.10-xen-sparse/drivers/xen/netfront/Kconfig
- 40f56239Wd4k_ycG_mFsSO1r5xKdtQ linux-2.6.10-xen-sparse/drivers/xen/netfront/Makefile
- 405853f6nbeazrNyEWNHBuoSg2PiPA linux-2.6.10-xen-sparse/drivers/xen/netfront/netfront.c
- 4108f5c1ppFXVpQzCOAZ6xXYubsjKA linux-2.6.10-xen-sparse/drivers/xen/privcmd/Makefile
- 3e5a4e65IUfzzMu2kZFlGEB8-rpTaA linux-2.6.10-xen-sparse/drivers/xen/privcmd/privcmd.c
+41ee5e8bYDQkjRVKnFn5uFyy0KreCw linux-2.6.10-xen-sparse/drivers/xen/usbback/common.h
+41ee5e8bt7xeBUJqG5XJS-ofukdsgA linux-2.6.10-xen-sparse/drivers/xen/usbback/control.c
+41ee5e8bSs3BGC7yegM_ek2Tn0Ahvw linux-2.6.10-xen-sparse/drivers/xen/usbback/interface.c
+41ee5e8bglvqKvZSY5uJ5JGQejEwyQ linux-2.6.10-xen-sparse/drivers/xen/usbback/usbback.c
+41ee5e8ckZ9xVNvu9NHIZDK7JqApmQ linux-2.6.10-xen-sparse/drivers/xen/usbfront/usbfront.c
+41ee5e8ck9scpGirfqEZRARbGDyTXA linux-2.6.10-xen-sparse/drivers/xen/usbfront/xhci.h
- 412f47e4RKD-R5IS5gEXvcT8L4v8gA linux-2.6.10-xen-sparse/include/asm-generic/pgtable.h
- 40f56239YAjS52QG2FIAQpHDZAdGHg linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/desc.h
- 4107adf1E5O4ztGHNGMzCCNhcvqNow linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h
- 40f5623akIoBsQ3KxSB2kufkbgONXQ linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/fixmap.h
- 41979925z1MsKU1SfuuheM1IFDQ_bA linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/floppy.h
- 4118b6a418gnL6AZsTdglC92YGqYTg linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/highmem.h
- 40f5623aJVXQwpJMOLE99XgvGsfQ8Q linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/io.h
- 40f5623aKXkBBxgpLx2NcvkncQ1Yyw linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h
- 40f5623aDMCsWOFO0jktZ4e8sjwvEg linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h
- 40f5623arsFXkGdPvIqvFi3yFXGR0Q linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h
+41811f07Iri9hrvs97t-baxmhOwWDQ linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/mach-xen/smpboot_hooks.h
- 4120f807GCO0uqsLqdZj9csxR1Wthw linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/mmu_context.h
- 40f5623adgjZq9nAgCt0IXdWl7udSA linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/page.h
- 40f5623a54NuG-7qHihGYmw4wWQnMA linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/param.h
- 41137cc1kkvg0cg7uxddcEfjL7L67w linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/pci.h
- 40f5623atCokYc2uCysSJ8jFO8TEsw linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/pgalloc.h
- 412e01beTwiaC8sYY4XJP8PxLST5CA linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/pgtable-2level-defs.h
- 40f5623aEToIXouJgO-ao5d5pcEt1w linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h
- 40f5623aCCXRPlGpNthVXstGz9ZV3A linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/pgtable.h
- 40f5623aPCkQQfPtJSooGdhcatrvnQ linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/processor.h
- 412ea0afQL2CAI-f522TbLjLPMibPQ linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/ptrace.h
- 40f5623bzLvxr7WoJIxVf2OH4rCBJg linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/segment.h
- 40f5623bG_LzgG6-qwk292nTc5Wabw linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/setup.h
+4198c32a8NzmcKVOzKaEJfaQxxiA0A linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/spinlock.h
- 40f5623bgzm_9vwxpzJswlAxg298Gg linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/synch_bitops.h
- 40f5623bVdKP7Dt7qm8twu3NcnGNbA linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/system.h
- 40f5623bc8LKPRO09wY5dGDnY_YCpw linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/tlbflush.h
- 41062ab7uFxnCq-KtPeAm-aV8CicgA linux-2.6.10-xen-sparse/include/asm-xen/asm-i386/vga.h
- 41af4017PDMuSmMWtSRU5UC9Vylw5g linux-2.6.10-xen-sparse/include/asm-xen/balloon.h
- 40f5623bYNP7tHE2zX6YQxp9Zq2utQ linux-2.6.10-xen-sparse/include/asm-xen/ctrl_if.h
- 40f5623b3Eqs8pAc5WpPX8_jTzV2qw linux-2.6.10-xen-sparse/include/asm-xen/evtchn.h
- 419b4e9367PjTEvdjwavWN12BeBBXg linux-2.6.10-xen-sparse/include/asm-xen/foreign_page.h
- 412dfaeazclyNDM0cpnp60Yo4xulpQ linux-2.6.10-xen-sparse/include/asm-xen/gnttab.h
- 40f5623aGPlsm0u1LTO-NVZ6AGzNRQ linux-2.6.10-xen-sparse/include/asm-xen/hypervisor.h
- 3f108af1ylCIm82H052FVTfXACBHrw linux-2.6.10-xen-sparse/include/asm-xen/linux-public/privcmd.h
- 3fa8e3f0kBLeE4To2vpdi3cpJbIkbQ linux-2.6.10-xen-sparse/include/asm-xen/linux-public/suspend.h
- 40f5623cndVUFlkxpf7Lfx7xu8madQ linux-2.6.10-xen-sparse/include/asm-xen/multicall.h
- 4122466356eIBnC9ot44WSVVIFyhQA linux-2.6.10-xen-sparse/include/asm-xen/queues.h
- 3f689063BoW-HWV3auUJ-OqXfcGArw linux-2.6.10-xen-sparse/include/asm-xen/xen_proc.h
- 419b4e93z2S0gR17XTy8wg09JEwAhg linux-2.6.10-xen-sparse/include/linux/gfp.h
- 42305f545Vc5SLCUewZ2-n-P9JJhEQ linux-2.6.10-xen-sparse/include/linux/highmem.h
- 419dfc609zbti8rqL60tL2dHXQ_rvQ linux-2.6.10-xen-sparse/include/linux/irq.h
- 4124f66f4NaKNa0xPiGGykn9QaZk3w linux-2.6.10-xen-sparse/include/linux/skbuff.h
- 419dfc6awx7w88wk6cG9P3mPidX6LQ linux-2.6.10-xen-sparse/kernel/irq/manage.c
- 40f56a0ddHCSs3501MY4hRf22tctOw linux-2.6.10-xen-sparse/mkbuildtree
- 42305f54Q6xJ1bXcQJlCQq1m-e2C8g linux-2.6.10-xen-sparse/mm/highmem.c
- 412f46c0LJuKAgSPGoC0Z1DEkLfuLA linux-2.6.10-xen-sparse/mm/memory.c
- 410a94a4KT6I6X0LVc7djB39tRDp4g linux-2.6.10-xen-sparse/mm/page_alloc.c
- 41505c572m-s9ATiO1LiD1GPznTTIg linux-2.6.10-xen-sparse/net/core/skbuff.c
+ 40f562372u3A7_kfbYYixPHJJxYUxA linux-2.6.11-xen-sparse/arch/xen/Kconfig
+ 40f56237utH41NPukqHksuNf29IC9A linux-2.6.11-xen-sparse/arch/xen/Kconfig.drivers
+ 40f56237penAAlWVBVDpeQZNFIg8CA linux-2.6.11-xen-sparse/arch/xen/Makefile
+ 40f56237JTc60m1FRlUxkUaGSQKrNw linux-2.6.11-xen-sparse/arch/xen/boot/Makefile
+ 40f56237hRxbacU_3PdoAl6DjZ3Jnw linux-2.6.11-xen-sparse/arch/xen/configs/xen0_defconfig
+ 40f56237wubfjJKlfIzZlI3ZM2VgGA linux-2.6.11-xen-sparse/arch/xen/configs/xenU_defconfig
+ 40f56237Mta0yHNaMS_qtM2rge0qYA linux-2.6.11-xen-sparse/arch/xen/i386/Kconfig
+ 40f56238u2CJdXNpjsZgHBxeVyY-2g linux-2.6.11-xen-sparse/arch/xen/i386/Makefile
+ 40f56238eczveJ86k_4hNxCLRQIF-g linux-2.6.11-xen-sparse/arch/xen/i386/kernel/Makefile
+ 40f56238rXVTJQKbBuXXLH52qEArcg linux-2.6.11-xen-sparse/arch/xen/i386/kernel/cpu/Makefile
+ 40f562385s4lr6Zg92gExe7UQ4A76Q linux-2.6.11-xen-sparse/arch/xen/i386/kernel/cpu/common.c
+ 41ab440bnpxZdWShZrGgM9pPaz5rmA linux-2.6.11-xen-sparse/arch/xen/i386/kernel/cpu/mtrr/Makefile
+ 41ab440bBKWz-aEOEojU4PAMXe3Ppg linux-2.6.11-xen-sparse/arch/xen/i386/kernel/cpu/mtrr/main.c
+ 40f56238XDtHSijkAFlbv1PT8Bhw_Q linux-2.6.11-xen-sparse/arch/xen/i386/kernel/entry.S
+ 40f56238bnvciAuyzAiMkdzGErYt1A linux-2.6.11-xen-sparse/arch/xen/i386/kernel/head.S
+ 40f58a0d31M2EkuPbG94ns_nOi0PVA linux-2.6.11-xen-sparse/arch/xen/i386/kernel/i386_ksyms.c
+ 40faa751_zbZlAmLyQgCXdYekVFdWA linux-2.6.11-xen-sparse/arch/xen/i386/kernel/ioport.c
+ 40f56238ue3YRsK52HG7iccNzP1AwQ linux-2.6.11-xen-sparse/arch/xen/i386/kernel/ldt.c
+ 41d54a76YMCA67S8J-TBT3J62Wx6yA linux-2.6.11-xen-sparse/arch/xen/i386/kernel/microcode.c
+ 4107adf1cNtsuOxOB4T6paAoY2R2PA linux-2.6.11-xen-sparse/arch/xen/i386/kernel/pci-dma.c
+ 40f56238a8iOVDEoostsbun_sy2i4g linux-2.6.11-xen-sparse/arch/xen/i386/kernel/process.c
+ 40f56238YQIJoYG2ehDGEcdTgLmGbg linux-2.6.11-xen-sparse/arch/xen/i386/kernel/setup.c
+ 40f56238nWMQg7CKbyTy0KJNvCzbtg linux-2.6.11-xen-sparse/arch/xen/i386/kernel/signal.c
+ 40f56238qVGkpO_ycnQA8k03kQzAgA linux-2.6.11-xen-sparse/arch/xen/i386/kernel/time.c
+ 40f56238NzTgeO63RGoxHrW5NQeO3Q linux-2.6.11-xen-sparse/arch/xen/i386/kernel/timers/Makefile
+ 40f56238BMqG5PuSHufpjbvp_helBw linux-2.6.11-xen-sparse/arch/xen/i386/kernel/timers/timer_tsc.c
+ 40f562389xNa78YBZciUibQjyRU_Lg linux-2.6.11-xen-sparse/arch/xen/i386/kernel/traps.c
+ 40f56238JypKAUG01ZojFwH7qnZ5uA linux-2.6.11-xen-sparse/arch/xen/i386/kernel/vsyscall.S
+ 40f56238wi6AdNQjm0RT57bSkwb6hg linux-2.6.11-xen-sparse/arch/xen/i386/kernel/vsyscall.lds
+ 40f56238a3w6-byOzexIlMgni76Lcg linux-2.6.11-xen-sparse/arch/xen/i386/mm/Makefile
+ 40f56238ILx8xlbywNbzTdv5Zr4xXQ linux-2.6.11-xen-sparse/arch/xen/i386/mm/fault.c
+ 4118cc35CbY8rfGVspF5O-7EkXBEAA linux-2.6.11-xen-sparse/arch/xen/i386/mm/highmem.c
+ 40f562383SKvDStdtrvzr5fyCbW4rw linux-2.6.11-xen-sparse/arch/xen/i386/mm/hypervisor.c
+ 40f56239xcNylAxuGsQHwi1AyMLV8w linux-2.6.11-xen-sparse/arch/xen/i386/mm/init.c
+ 41062ab7CjxC1UBaFhOMWWdhHkIUyg linux-2.6.11-xen-sparse/arch/xen/i386/mm/ioremap.c
+ 413b5ab8LIowAnQrEmaOJSdmqm96jQ linux-2.6.11-xen-sparse/arch/xen/i386/mm/pageattr.c
+ 40f5623906UYHv1rsVUeRc0tFT0dWw linux-2.6.11-xen-sparse/arch/xen/i386/mm/pgtable.c
+ 4107adf12ndy94MidCaivDibJ3pPAg linux-2.6.11-xen-sparse/arch/xen/i386/pci/Makefile
+ 4107adf1WcCgkhsdLTRGX52cOG1vJg linux-2.6.11-xen-sparse/arch/xen/i386/pci/direct.c
+ 4107adf1s5u6249DNPUViX1YNagbUQ linux-2.6.11-xen-sparse/arch/xen/i386/pci/irq.c
+ 40f56239zOksGg_H4XD4ye6iZNtoZA linux-2.6.11-xen-sparse/arch/xen/kernel/Makefile
+ 40f56239bvOjuuuViZ0XMlNiREFC0A linux-2.6.11-xen-sparse/arch/xen/kernel/ctrl_if.c
+ 41ab6fa06JdF7jxUsuDcjN3UhuIAxg linux-2.6.11-xen-sparse/arch/xen/kernel/devmem.c
+ 40f56238xFQe9T7M_U_FItM-bZIpLw linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c
+ 4110f478aeQWllIN7J4kouAHiAqrPw linux-2.6.11-xen-sparse/arch/xen/kernel/fixup.c
+ 412dfae9eA3_6e6bCGUtg1mj8b56fQ linux-2.6.11-xen-sparse/arch/xen/kernel/gnttab.c
+ 40f562392LBhwmOxVPsYdkYXMxI_ZQ linux-2.6.11-xen-sparse/arch/xen/kernel/reboot.c
+ 414c113396tK1HTVeUalm3u-1DF16g linux-2.6.11-xen-sparse/arch/xen/kernel/skbuff.c
+ 3f68905c5eiA-lBMQSvXLMWS1ikDEA linux-2.6.11-xen-sparse/arch/xen/kernel/xen_proc.c
+ 41261688yS8eAyy-7kzG4KBs0xbYCA linux-2.6.11-xen-sparse/drivers/Makefile
+ 4108f5c1WfTIrs0HZFeV39sttekCTw linux-2.6.11-xen-sparse/drivers/char/mem.c
+ 4111308bZAIzwf_Kzu6x1TZYZ3E0_Q linux-2.6.11-xen-sparse/drivers/char/tty_io.c
+ 40f56239Dp_vMTgz8TEbvo1hjHGc3w linux-2.6.11-xen-sparse/drivers/xen/Makefile
+ 41768fbcncpBQf8s2l2-CwoSNIZ9uA linux-2.6.11-xen-sparse/drivers/xen/balloon/Makefile
+ 3e6377f8i5e9eGz7Pw6fQuhuTQ7DQg linux-2.6.11-xen-sparse/drivers/xen/balloon/balloon.c
+ 410d0893otFGghmv4dUXDUBBdY5aIA linux-2.6.11-xen-sparse/drivers/xen/blkback/Makefile
+ 4087cf0d1XgMkooTZAiJS6NrcpLQNQ linux-2.6.11-xen-sparse/drivers/xen/blkback/blkback.c
+ 4087cf0dZadZ8r6CEt4fNN350Yle3A linux-2.6.11-xen-sparse/drivers/xen/blkback/common.h
+ 4087cf0dxlh29iw0w-9rxOCEGCjPcw linux-2.6.11-xen-sparse/drivers/xen/blkback/control.c
+ 4087cf0dbuoH20fMjNZjcgrRK-1msQ linux-2.6.11-xen-sparse/drivers/xen/blkback/interface.c
+ 4087cf0dk97tacDzxfByWV7JifUYqA linux-2.6.11-xen-sparse/drivers/xen/blkback/vbd.c
+ 40f56239Sfle6wGv5FS0wjS_HI150A linux-2.6.11-xen-sparse/drivers/xen/blkfront/Kconfig
+ 40f562395atl9x4suKGhPkjqLOXESg linux-2.6.11-xen-sparse/drivers/xen/blkfront/Makefile
+ 40f56239-JNIaTzlviVJohVdoYOUpw linux-2.6.11-xen-sparse/drivers/xen/blkfront/blkfront.c
+ 40f56239y9naBTXe40Pi2J_z3p-d1g linux-2.6.11-xen-sparse/drivers/xen/blkfront/block.h
+ 40f56239BVfPsXBiWQitXgDRtOsiqg linux-2.6.11-xen-sparse/drivers/xen/blkfront/vbd.c
+ 40f56239fsLjvtD8YBRAWphps4FDjg linux-2.6.11-xen-sparse/drivers/xen/console/Makefile
+ 3e5a4e651TH-SXHoufurnWjgl5bfOA linux-2.6.11-xen-sparse/drivers/xen/console/console.c
+ 40f56239KYxO0YabhPzCTeUuln-lnA linux-2.6.11-xen-sparse/drivers/xen/evtchn/Makefile
+ 40f56239DoibTX6R-ZYd3QTXAB8_TA linux-2.6.11-xen-sparse/drivers/xen/evtchn/evtchn.c
+ 410a9817HEVJvred5Oy_uKH3HFJC5Q linux-2.6.11-xen-sparse/drivers/xen/netback/Makefile
+ 4097ba831lpGeLlPg-bfV8XarVVuoQ linux-2.6.11-xen-sparse/drivers/xen/netback/common.h
+ 4097ba83wvv8yi5P5xugCUBAdb6O-A linux-2.6.11-xen-sparse/drivers/xen/netback/control.c
+ 4097ba83byY5bTSugJGZ1exTxIcMKw linux-2.6.11-xen-sparse/drivers/xen/netback/interface.c
+ 4087cf0dGmSbFhFZyIZBJzvqxY-qBw linux-2.6.11-xen-sparse/drivers/xen/netback/netback.c
+ 40f56239lrg_Ob0BJ8WBFS1zeg2CYw linux-2.6.11-xen-sparse/drivers/xen/netfront/Kconfig
+ 40f56239Wd4k_ycG_mFsSO1r5xKdtQ linux-2.6.11-xen-sparse/drivers/xen/netfront/Makefile
+ 405853f6nbeazrNyEWNHBuoSg2PiPA linux-2.6.11-xen-sparse/drivers/xen/netfront/netfront.c
+ 4108f5c1ppFXVpQzCOAZ6xXYubsjKA linux-2.6.11-xen-sparse/drivers/xen/privcmd/Makefile
+ 3e5a4e65IUfzzMu2kZFlGEB8-rpTaA linux-2.6.11-xen-sparse/drivers/xen/privcmd/privcmd.c
+ 412f47e4RKD-R5IS5gEXvcT8L4v8gA linux-2.6.11-xen-sparse/include/asm-generic/pgtable.h
+ 40f56239YAjS52QG2FIAQpHDZAdGHg linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/desc.h
+ 4107adf1E5O4ztGHNGMzCCNhcvqNow linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h
+ 40f5623akIoBsQ3KxSB2kufkbgONXQ linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/fixmap.h
+ 41979925z1MsKU1SfuuheM1IFDQ_bA linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/floppy.h
+ 4118b6a418gnL6AZsTdglC92YGqYTg linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/highmem.h
+ 40f5623aJVXQwpJMOLE99XgvGsfQ8Q linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/io.h
+ 40f5623aKXkBBxgpLx2NcvkncQ1Yyw linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h
+ 40f5623aDMCsWOFO0jktZ4e8sjwvEg linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_post.h
+ 40f5623arsFXkGdPvIqvFi3yFXGR0Q linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mach-xen/setup_arch_pre.h
+ 4120f807GCO0uqsLqdZj9csxR1Wthw linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/mmu_context.h
-40f5623aFTyFTR-vdiA-KaGxk5JOKQ linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/msr.h
+ 40f5623adgjZq9nAgCt0IXdWl7udSA linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/page.h
+ 40f5623a54NuG-7qHihGYmw4wWQnMA linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/param.h
+ 41137cc1kkvg0cg7uxddcEfjL7L67w linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pci.h
+ 40f5623atCokYc2uCysSJ8jFO8TEsw linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgalloc.h
+ 412e01beTwiaC8sYY4XJP8PxLST5CA linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgtable-2level-defs.h
+ 40f5623aEToIXouJgO-ao5d5pcEt1w linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h
+ 40f5623aCCXRPlGpNthVXstGz9ZV3A linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/pgtable.h
+ 40f5623aPCkQQfPtJSooGdhcatrvnQ linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/processor.h
+ 412ea0afQL2CAI-f522TbLjLPMibPQ linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/ptrace.h
+ 40f5623bzLvxr7WoJIxVf2OH4rCBJg linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/segment.h
+ 40f5623bG_LzgG6-qwk292nTc5Wabw linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/setup.h
+ 40f5623bgzm_9vwxpzJswlAxg298Gg linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/synch_bitops.h
+ 40f5623bVdKP7Dt7qm8twu3NcnGNbA linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/system.h
+ 40f5623bc8LKPRO09wY5dGDnY_YCpw linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/tlbflush.h
+ 41062ab7uFxnCq-KtPeAm-aV8CicgA linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/vga.h
-40f5623bxUbeGjkRrjDguCy_Gm8RLw linux-2.6.11-xen-sparse/include/asm-xen/asm-i386/xor.h
+ 41af4017PDMuSmMWtSRU5UC9Vylw5g linux-2.6.11-xen-sparse/include/asm-xen/balloon.h
+ 40f5623bYNP7tHE2zX6YQxp9Zq2utQ linux-2.6.11-xen-sparse/include/asm-xen/ctrl_if.h
+ 40f5623b3Eqs8pAc5WpPX8_jTzV2qw linux-2.6.11-xen-sparse/include/asm-xen/evtchn.h
+ 419b4e9367PjTEvdjwavWN12BeBBXg linux-2.6.11-xen-sparse/include/asm-xen/foreign_page.h
+ 412dfaeazclyNDM0cpnp60Yo4xulpQ linux-2.6.11-xen-sparse/include/asm-xen/gnttab.h
+ 40f5623aGPlsm0u1LTO-NVZ6AGzNRQ linux-2.6.11-xen-sparse/include/asm-xen/hypervisor.h
+ 3f108af1ylCIm82H052FVTfXACBHrw linux-2.6.11-xen-sparse/include/asm-xen/linux-public/privcmd.h
+ 3fa8e3f0kBLeE4To2vpdi3cpJbIkbQ linux-2.6.11-xen-sparse/include/asm-xen/linux-public/suspend.h
+ 40f5623cndVUFlkxpf7Lfx7xu8madQ linux-2.6.11-xen-sparse/include/asm-xen/multicall.h
+ 4122466356eIBnC9ot44WSVVIFyhQA linux-2.6.11-xen-sparse/include/asm-xen/queues.h
+ 3f689063BoW-HWV3auUJ-OqXfcGArw linux-2.6.11-xen-sparse/include/asm-xen/xen_proc.h
+ 419b4e93z2S0gR17XTy8wg09JEwAhg linux-2.6.11-xen-sparse/include/linux/gfp.h
+ 42305f545Vc5SLCUewZ2-n-P9JJhEQ linux-2.6.11-xen-sparse/include/linux/highmem.h
+ 419dfc609zbti8rqL60tL2dHXQ_rvQ linux-2.6.11-xen-sparse/include/linux/irq.h
+ 419dfc6awx7w88wk6cG9P3mPidX6LQ linux-2.6.11-xen-sparse/kernel/irq/manage.c
+ 40f56a0ddHCSs3501MY4hRf22tctOw linux-2.6.11-xen-sparse/mkbuildtree
+ 42305f54Q6xJ1bXcQJlCQq1m-e2C8g linux-2.6.11-xen-sparse/mm/highmem.c
+ 412f46c0LJuKAgSPGoC0Z1DEkLfuLA linux-2.6.11-xen-sparse/mm/memory.c
+ 410a94a4KT6I6X0LVc7djB39tRDp4g linux-2.6.11-xen-sparse/mm/page_alloc.c
413cb1e4zst25MDYjg63Y-NGC5_pLg netbsd-2.0-xen-sparse/Makefile
413cb1e5c_Mkxf_X0zimEhTKI_l4DA netbsd-2.0-xen-sparse/mkbuildtree
413cb1e5kY_Zil7-b0kI6hvCIxBEYg netbsd-2.0-xen-sparse/nbconfig-xen
422e4430vKaHLOOGS7X-SUUe3EBCgw netbsd-2.0-xen-sparse/sys/miscfs/kernfs/kernfs.h
422e4430-gOD358H8nGGnNWes08Nng netbsd-2.0-xen-sparse/sys/miscfs/kernfs/kernfs_vnops.c
413cb3b53nyOv1OIeDSsCXhBFDXvJA netbsd-2.0-xen-sparse/sys/nfs/files.nfs
- 413aa1d0oNP8HXLvfPuMe6cSroUfSA patches/linux-2.6.10/agpgart.patch
+ 413aa1d0oNP8HXLvfPuMe6cSroUfSA patches/linux-2.6.11/agpgart.patch
-418abc69J3F638vPO9MYoDGeYilxoQ patches/linux-2.6.11/nettel.patch
3f776bd1Hy9rn69ntXBhPReUFw9IEA tools/Makefile
40e1b09db5mN69Ijj0X_Eol-S7dXiw tools/Rules.mk
+4209033eUwhDBJ_bxejiv5c6gjXS4A tools/blktap/Makefile
+4209033ewLAHdhGrT_2jo3Gb_5bDcA tools/blktap/README
+42277b02mYXxgijE7MFeUe9d8eldMw tools/blktap/README-PARALLAX
+4209033eX_Xw94wHaOCtnU9nOAtSJA tools/blktap/blkaio.c
+4209033egwf6LDxM2hbaqi9rRdZy4A tools/blktap/blkaiolib.c
+4209033f9yELLK85Ipo2oKjr3ickgQ tools/blktap/blkaiolib.h
+4209033fL9LcSI6LXrIp5O4axbUBLg tools/blktap/blkcow.c
+4209033fUDlFGZreIyZHdP7h7yfvuQ tools/blktap/blkcowgnbd.c
+4209033fCgZzLeMOwNBFmsp99x58ZQ tools/blktap/blkcowimg.c
+4209033frfXH6oOi9AvRz08PPAndNA tools/blktap/blkcowlib.c
+4209033fhFd_y2go9HgCF395A35xJg tools/blktap/blkcowlib.h
+4209033fHgtGpb_K16_xC9CpkjNZLw tools/blktap/blkdump.c
+4209033fm61CZG1RyKDW75V-eTZ9fg tools/blktap/blkgnbd.c
+4209033fVfa-R6MFgGcmsQHTDna4PA tools/blktap/blkgnbdlib.c
+4209033fIgDQbaHwHStHhPEDTtbqsA tools/blktap/blkgnbdlib.h
+4209033figp5JRsKsXY8rw4keRumkg tools/blktap/blkimg.c
+42090340V-8HKGlr00SyJGsE5jXC3A tools/blktap/blkimglib.c
+42090340c7pQbh0Km8zLcEqPd_3zIg tools/blktap/blkimglib.h
+42090340_mvZtozMjghPJO0qsjk4NQ tools/blktap/blkint.h
+42090340rc2q1wmlGn6HtiJAkqhtNQ tools/blktap/blktaplib.c
+42090340C-WkRPT7N3t-8Lzehzogdw tools/blktap/blktaplib.h
+42277b02WrfP1meTDPv1M5swFq8oHQ tools/blktap/blockstore.c
+42277b02P1C0FYj3gqwTZUD8sxKCug tools/blktap/blockstore.h
+42090340B3mDvcxvd9ehDHUkg46hvw tools/blktap/libgnbd/Makefile
+42090340ZWkc5Xhf9lpQmDON8HJXww tools/blktap/libgnbd/gnbdtest.c
+42090340ocMiUScJE3OpY7QNunvSbg tools/blktap/libgnbd/libgnbd.c
+42090340G5_F_EeVnPORKB0pTMGGhA tools/blktap/libgnbd/libgnbd.h
+42277b03930x2TJT3PZlw6o0GERXpw tools/blktap/parallax.c
+42277b03XQYq8bujXSz7JAZ8N7j_pA tools/blktap/radix.c
+42277b03vZ4-jno_mgKmAcCW3ycRAg tools/blktap/radix.h
+42277b03U_wLHL-alMA0bfxGlqldXg tools/blktap/snaplog.c
+42277b04Ryya-z662BEx8HnxNN0dGQ tools/blktap/snaplog.h
+42277b04LxFjptgZ75Z98DUAso4Prg tools/blktap/vdi.c
+42277b04tt5QkIvs8She8CQqH5kwpg tools/blktap/vdi.h
+42277b04zMAhB0_946sHQ_H2vwnt0Q tools/blktap/vdi_create.c
+42277b04xB_iUmiSm6nKcy8OV8bckA tools/blktap/vdi_fill.c
+42277b045CJGD_rKH-ZT_-0X4knhWA tools/blktap/vdi_list.c
+42277b043ZKx0NJSbcgptQctQ5rerg tools/blktap/vdi_snap.c
+42277b043Fjy5-H7LyBtUPyDlZFo6A tools/blktap/vdi_snap_list.c
+42277b04vhqD6Lq3WmGbaESoAAKdhw tools/blktap/vdi_tree.c
+42277b047H8fTVyUf75BWAjh6Zpsqg tools/blktap/vdi_validate.c
4124b307nRyK3dhn1hAsvrY76NuV3g tools/check/Makefile
4124b307vHLUWbfpemVefmaWDcdfag tools/check/README
4124b307jt7T3CHysgl9LijNHSe1tA tools/check/check_brctl
--- /dev/null
+ #
+ # For a description of the syntax of this configuration file,
+ # see Documentation/kbuild/kconfig-language.txt.
+ #
+
+ mainmenu "Linux Kernel Configuration"
+
+ config XEN
+ bool
+ default y
+ help
+ This is the Linux Xen port.
+
+ config ARCH_XEN
+ bool
+ default y
+
+
+ config NO_IDLE_HZ
+ bool
+ default y
+
+
+ menu "XEN"
+
+ config XEN_PRIVILEGED_GUEST
+ bool "Privileged Guest (domain 0)"
+ default n
+ select XEN_PHYSDEV_ACCESS
+ help
+ Support for privileged operation (domain 0)
+
+ config XEN_PHYSDEV_ACCESS
+ bool "Physical device access"
+ default XEN_PRIVILEGED_GUEST
+ help
+ Assume access is available to physical hardware devices
+ (e.g., hard drives, network cards). This allows you to configure
+ such devices and also includes some low-level support that is
+ otherwise not compiled into the kernel.
+
+ config XEN_BLKDEV_BACKEND
+ bool "Block-device backend driver"
+ depends on XEN_PHYSDEV_ACCESS
+ default y
+ help
+ The block-device backend driver allows the kernel to export its
+ block devices to other guests via a high-performance shared-memory
+ interface.
+
++config XEN_BLKDEV_TAP_BE
++ bool "Block Tap support for backend driver (DANGEROUS)"
++ depends on XEN_BLKDEV_BACKEND
++ default n
++ help
++ If you intend to use the block tap driver, the backend domain will
++ not know the domain id of the real frontend, and so will not be able
++ to map its data pages. This modifies the backend to attempt to map
++ from both the tap domain and the real frontend. This presents a
++ security risk, and so should ONLY be used for development
++ with the blktap. This option will be removed as the block drivers are
++ modified to use grant tables.
++
+ config XEN_NETDEV_BACKEND
+ bool "Network-device backend driver"
+ depends on XEN_PHYSDEV_ACCESS
+ default y
+ help
+ The network-device backend driver allows the kernel to export its
+ network devices to other guests via a high-performance shared-memory
+ interface.
+
+ config XEN_BLKDEV_FRONTEND
+ bool "Block-device frontend driver"
+ default y
+ help
+ The block-device frontend driver allows the kernel to access block
+ devices mounted within another guest OS. Unless you are building a
+ dedicated device-driver domain, or your master control domain
+ (domain 0), then you almost certainly want to say Y here.
+
+ config XEN_NETDEV_FRONTEND
+ bool "Network-device frontend driver"
+ default y
+ help
+ The network-device frontend driver allows the kernel to access
+ network interfaces within another guest OS. Unless you are building a
+ dedicated device-driver domain, or your master control domain
+ (domain 0), then you almost certainly want to say Y here.
+
+ config XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER
+ bool "Pipelined transmitter (DANGEROUS)"
+ depends on XEN_NETDEV_FRONTEND
+ default n
+ help
+ The driver will assume that the backend is pipelining packets for
+ transmission: whenever packets are pending in the remote backend,
+ the driver will not send asynchronous notifications when it queues
+ additional packets for transmission.
+ If the backend is a dumb domain, such as a transparent Ethernet
+ bridge with no local IP interface, it is safe to say Y here to get
+ slightly lower network overhead.
+ If the backend has a local IP interface; or may be doing smart things
+ like reassembling packets to perform firewall filtering; or if you
+ are unsure; or if you experience network hangs when this option is
+ enabled; then you must say N here.
+
++config XEN_BLKDEV_TAP
++ bool "Block device tap driver"
++ default n
++ help
++ This driver allows a VM to interact on block device channels
++ to other VMs. Block messages may be passed through or redirected
++ to a character device, allowing device prototyping in application
++ space. Odds are that you want to say N here.
++
+ config XEN_WRITABLE_PAGETABLES
+ bool
+ default y
+
+ config XEN_SCRUB_PAGES
+ bool "Scrub memory before freeing it to Xen"
+ default y
+ help
+ Erase memory contents before freeing it back to Xen's global
+ pool. This ensures that any secrets contained within that
+ memory (e.g., private keys) cannot be found by other guests that
+ may be running on the machine. Most people will want to say Y here.
+ If security is not a concern then you may increase performance by
+ saying N.
+
+ choice
+ prompt "Processor Type"
+ default X86
+
+ config X86
+ bool "X86"
+ help
+ Choose this option if your computer is a X86 architecture.
+
+ config X86_64
+ bool "X86_64"
+ help
+ Choose this option if your computer is a X86 architecture.
+
+ endchoice
+
+ endmenu
+
+ config HAVE_ARCH_DEV_ALLOC_SKB
+ bool
+ default y
+
+ source "init/Kconfig"
+
+ if X86
+ source "arch/xen/i386/Kconfig"
+ endif
+
+ menu "Executable file formats"
+
+ source "fs/Kconfig.binfmt"
+
+ endmenu
+
+ source "arch/xen/Kconfig.drivers"
+
+ source "fs/Kconfig"
+
+ source "security/Kconfig"
+
+ source "crypto/Kconfig"
+
+ source "lib/Kconfig"
--- /dev/null
+ #
+ # Automatically generated make config: don't edit
+ # Linux kernel version: 2.6.11-xenU
+ # Fri Mar 11 01:20:28 2005
+ #
+ CONFIG_XEN=y
+ CONFIG_ARCH_XEN=y
+ CONFIG_NO_IDLE_HZ=y
+
+ #
+ # XEN
+ #
+ # CONFIG_XEN_PRIVILEGED_GUEST is not set
+ # CONFIG_XEN_PHYSDEV_ACCESS is not set
+ CONFIG_XEN_BLKDEV_FRONTEND=y
+ CONFIG_XEN_NETDEV_FRONTEND=y
+ # CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER is not set
++# CONFIG_XEN_BLKDEV_TAP is not set
+ CONFIG_XEN_WRITABLE_PAGETABLES=y
+ CONFIG_XEN_SCRUB_PAGES=y
+ CONFIG_X86=y
+ # CONFIG_X86_64 is not set
+ CONFIG_HAVE_ARCH_DEV_ALLOC_SKB=y
+
+ #
+ # Code maturity level options
+ #
+ CONFIG_EXPERIMENTAL=y
+ CONFIG_CLEAN_COMPILE=y
+ CONFIG_BROKEN_ON_SMP=y
+ CONFIG_LOCK_KERNEL=y
+
+ #
+ # General setup
+ #
+ CONFIG_LOCALVERSION=""
+ CONFIG_SWAP=y
+ CONFIG_SYSVIPC=y
+ # CONFIG_POSIX_MQUEUE is not set
+ # CONFIG_BSD_PROCESS_ACCT is not set
+ CONFIG_SYSCTL=y
+ # CONFIG_AUDIT is not set
+ CONFIG_LOG_BUF_SHIFT=14
+ CONFIG_HOTPLUG=y
+ CONFIG_KOBJECT_UEVENT=y
+ # CONFIG_IKCONFIG is not set
+ # CONFIG_EMBEDDED is not set
+ CONFIG_KALLSYMS=y
+ # CONFIG_KALLSYMS_EXTRA_PASS is not set
+ CONFIG_FUTEX=y
+ CONFIG_EPOLL=y
+ # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+ CONFIG_SHMEM=y
+ CONFIG_CC_ALIGN_FUNCTIONS=0
+ CONFIG_CC_ALIGN_LABELS=0
+ CONFIG_CC_ALIGN_LOOPS=0
+ CONFIG_CC_ALIGN_JUMPS=0
+ # CONFIG_TINY_SHMEM is not set
+
+ #
+ # Loadable module support
+ #
+ CONFIG_MODULES=y
+ CONFIG_MODULE_UNLOAD=y
+ # CONFIG_MODULE_FORCE_UNLOAD is not set
+ CONFIG_OBSOLETE_MODPARM=y
+ # CONFIG_MODVERSIONS is not set
+ # CONFIG_MODULE_SRCVERSION_ALL is not set
+ CONFIG_KMOD=y
+
+ #
+ # X86 Processor Configuration
+ #
+ CONFIG_XENARCH="i386"
+ CONFIG_MMU=y
+ CONFIG_UID16=y
+ CONFIG_GENERIC_ISA_DMA=y
+ CONFIG_GENERIC_IOMAP=y
+ # CONFIG_M386 is not set
+ # CONFIG_M486 is not set
+ # CONFIG_M586 is not set
+ # CONFIG_M586TSC is not set
+ # CONFIG_M586MMX is not set
+ # CONFIG_M686 is not set
+ # CONFIG_MPENTIUMII is not set
+ # CONFIG_MPENTIUMIII is not set
+ # CONFIG_MPENTIUMM is not set
+ CONFIG_MPENTIUM4=y
+ # CONFIG_MK6 is not set
+ # CONFIG_MK7 is not set
+ # CONFIG_MK8 is not set
+ # CONFIG_MCRUSOE is not set
+ # CONFIG_MEFFICEON is not set
+ # CONFIG_MWINCHIPC6 is not set
+ # CONFIG_MWINCHIP2 is not set
+ # CONFIG_MWINCHIP3D is not set
+ # CONFIG_MCYRIXIII is not set
+ # CONFIG_MVIAC3_2 is not set
+ # CONFIG_X86_GENERIC is not set
+ CONFIG_X86_CMPXCHG=y
+ CONFIG_X86_XADD=y
+ CONFIG_X86_L1_CACHE_SHIFT=7
+ CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+ CONFIG_GENERIC_CALIBRATE_DELAY=y
+ CONFIG_X86_WP_WORKS_OK=y
+ CONFIG_X86_INVLPG=y
+ CONFIG_X86_BSWAP=y
+ CONFIG_X86_POPAD_OK=y
+ CONFIG_X86_GOOD_APIC=y
+ CONFIG_X86_INTEL_USERCOPY=y
+ CONFIG_X86_USE_PPRO_CHECKSUM=y
+ # CONFIG_HPET_TIMER is not set
+ # CONFIG_HPET_EMULATE_RTC is not set
+ # CONFIG_SMP is not set
+ CONFIG_PREEMPT=y
+ CONFIG_PREEMPT_BKL=y
+ CONFIG_X86_CPUID=y
+
+ #
+ # Firmware Drivers
+ #
+ # CONFIG_EDD is not set
+ CONFIG_NOHIGHMEM=y
+ # CONFIG_HIGHMEM4G is not set
+ CONFIG_HAVE_DEC_LOCK=y
+ # CONFIG_REGPARM is not set
+
+ #
+ # Kernel hacking
+ #
+ # CONFIG_DEBUG_KERNEL is not set
+ CONFIG_EARLY_PRINTK=y
+ # CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+ # CONFIG_FRAME_POINTER is not set
+ # CONFIG_4KSTACKS is not set
+ CONFIG_GENERIC_HARDIRQS=y
+ CONFIG_GENERIC_IRQ_PROBE=y
+ CONFIG_X86_BIOS_REBOOT=y
+ CONFIG_PC=y
+
+ #
+ # Executable file formats
+ #
+ CONFIG_BINFMT_ELF=y
+ # CONFIG_BINFMT_AOUT is not set
+ # CONFIG_BINFMT_MISC is not set
+
+ #
+ # Device Drivers
+ #
+
+ #
+ # Generic Driver Options
+ #
+ CONFIG_STANDALONE=y
+ CONFIG_PREVENT_FIRMWARE_BUILD=y
+ # CONFIG_FW_LOADER is not set
+
+ #
+ # Block devices
+ #
+ # CONFIG_BLK_DEV_FD is not set
+ # CONFIG_BLK_DEV_COW_COMMON is not set
+ CONFIG_BLK_DEV_LOOP=m
+ # CONFIG_BLK_DEV_CRYPTOLOOP is not set
+ CONFIG_BLK_DEV_NBD=m
+ CONFIG_BLK_DEV_RAM=y
+ CONFIG_BLK_DEV_RAM_COUNT=16
+ CONFIG_BLK_DEV_RAM_SIZE=4096
+ CONFIG_BLK_DEV_INITRD=y
+ CONFIG_INITRAMFS_SOURCE=""
+ # CONFIG_LBD is not set
+ # CONFIG_CDROM_PKTCDVD is not set
+
+ #
+ # IO Schedulers
+ #
+ CONFIG_IOSCHED_NOOP=y
+ CONFIG_IOSCHED_AS=y
+ CONFIG_IOSCHED_DEADLINE=y
+ CONFIG_IOSCHED_CFQ=y
+ # CONFIG_ATA_OVER_ETH is not set
+
+ #
+ # SCSI device support
+ #
+ CONFIG_SCSI=m
+ CONFIG_SCSI_PROC_FS=y
+
+ #
+ # SCSI support type (disk, tape, CD-ROM)
+ #
+ CONFIG_BLK_DEV_SD=m
+ # CONFIG_CHR_DEV_ST is not set
+ # CONFIG_CHR_DEV_OSST is not set
+ # CONFIG_BLK_DEV_SR is not set
+ # CONFIG_CHR_DEV_SG is not set
+
+ #
+ # Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+ #
+ # CONFIG_SCSI_MULTI_LUN is not set
+ # CONFIG_SCSI_CONSTANTS is not set
+ # CONFIG_SCSI_LOGGING is not set
+
+ #
+ # SCSI Transport Attributes
+ #
+ # CONFIG_SCSI_SPI_ATTRS is not set
+ # CONFIG_SCSI_FC_ATTRS is not set
+ # CONFIG_SCSI_ISCSI_ATTRS is not set
+
+ #
+ # SCSI low-level drivers
+ #
+ # CONFIG_SCSI_SATA is not set
+ # CONFIG_SCSI_DEBUG is not set
+
+ #
+ # Multi-device support (RAID and LVM)
+ #
+ # CONFIG_MD is not set
+
+ #
+ # Networking support
+ #
+ CONFIG_NET=y
+
+ #
+ # Networking options
+ #
+ CONFIG_PACKET=y
+ # CONFIG_PACKET_MMAP is not set
+ # CONFIG_NETLINK_DEV is not set
+ CONFIG_UNIX=y
+ # CONFIG_NET_KEY is not set
+ CONFIG_INET=y
+ # CONFIG_IP_MULTICAST is not set
+ # CONFIG_IP_ADVANCED_ROUTER is not set
+ CONFIG_IP_PNP=y
+ # CONFIG_IP_PNP_DHCP is not set
+ # CONFIG_IP_PNP_BOOTP is not set
+ # CONFIG_IP_PNP_RARP is not set
+ # CONFIG_NET_IPIP is not set
+ # CONFIG_NET_IPGRE is not set
+ # CONFIG_ARPD is not set
+ # CONFIG_SYN_COOKIES is not set
+ # CONFIG_INET_AH is not set
+ # CONFIG_INET_ESP is not set
+ # CONFIG_INET_IPCOMP is not set
+ # CONFIG_INET_TUNNEL is not set
+ CONFIG_IP_TCPDIAG=y
+ # CONFIG_IP_TCPDIAG_IPV6 is not set
+ # CONFIG_IPV6 is not set
+ # CONFIG_NETFILTER is not set
+
+ #
+ # SCTP Configuration (EXPERIMENTAL)
+ #
+ # CONFIG_IP_SCTP is not set
+ # CONFIG_ATM is not set
+ # CONFIG_BRIDGE is not set
+ # CONFIG_VLAN_8021Q is not set
+ # CONFIG_DECNET is not set
+ # CONFIG_LLC2 is not set
+ # CONFIG_IPX is not set
+ # CONFIG_ATALK is not set
+ # CONFIG_X25 is not set
+ # CONFIG_LAPB is not set
+ # CONFIG_NET_DIVERT is not set
+ # CONFIG_ECONET is not set
+ # CONFIG_WAN_ROUTER is not set
+
+ #
+ # QoS and/or fair queueing
+ #
+ # CONFIG_NET_SCHED is not set
+ # CONFIG_NET_CLS_ROUTE is not set
+
+ #
+ # Network testing
+ #
+ # CONFIG_NET_PKTGEN is not set
+ # CONFIG_NETPOLL is not set
+ # CONFIG_NET_POLL_CONTROLLER is not set
+ # CONFIG_HAMRADIO is not set
+ # CONFIG_IRDA is not set
+ # CONFIG_BT is not set
+ CONFIG_NETDEVICES=y
+ # CONFIG_DUMMY is not set
+ # CONFIG_BONDING is not set
+ # CONFIG_EQUALIZER is not set
+ # CONFIG_TUN is not set
+
+ #
+ # Ethernet (10 or 100Mbit)
+ #
+ # CONFIG_NET_ETHERNET is not set
+
+ #
+ # Ethernet (1000 Mbit)
+ #
+
+ #
+ # Ethernet (10000 Mbit)
+ #
+
+ #
+ # Token Ring devices
+ #
+
+ #
+ # Wireless LAN (non-hamradio)
+ #
+ # CONFIG_NET_RADIO is not set
+
+ #
+ # Wan interfaces
+ #
+ # CONFIG_WAN is not set
+ # CONFIG_PPP is not set
+ # CONFIG_SLIP is not set
+ # CONFIG_SHAPER is not set
+ # CONFIG_NETCONSOLE is not set
+ CONFIG_UNIX98_PTYS=y
+
+ #
+ # File systems
+ #
+ CONFIG_EXT2_FS=y
+ # CONFIG_EXT2_FS_XATTR is not set
+ CONFIG_EXT3_FS=y
+ CONFIG_EXT3_FS_XATTR=y
+ # CONFIG_EXT3_FS_POSIX_ACL is not set
+ # CONFIG_EXT3_FS_SECURITY is not set
+ CONFIG_JBD=y
+ # CONFIG_JBD_DEBUG is not set
+ CONFIG_FS_MBCACHE=y
+ CONFIG_REISERFS_FS=y
+ # CONFIG_REISERFS_CHECK is not set
+ # CONFIG_REISERFS_PROC_INFO is not set
+ # CONFIG_REISERFS_FS_XATTR is not set
+ # CONFIG_JFS_FS is not set
+
+ #
+ # XFS support
+ #
+ # CONFIG_XFS_FS is not set
+ # CONFIG_MINIX_FS is not set
+ # CONFIG_ROMFS_FS is not set
+ # CONFIG_QUOTA is not set
+ CONFIG_DNOTIFY=y
+ CONFIG_AUTOFS_FS=y
+ CONFIG_AUTOFS4_FS=y
+
+ #
+ # CD-ROM/DVD Filesystems
+ #
+ CONFIG_ISO9660_FS=y
+ CONFIG_JOLIET=y
+ CONFIG_ZISOFS=y
+ CONFIG_ZISOFS_FS=y
+ # CONFIG_UDF_FS is not set
+
+ #
+ # DOS/FAT/NT Filesystems
+ #
+ CONFIG_FAT_FS=m
+ CONFIG_MSDOS_FS=m
+ CONFIG_VFAT_FS=m
+ CONFIG_FAT_DEFAULT_CODEPAGE=437
+ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+ # CONFIG_NTFS_FS is not set
+
+ #
+ # Pseudo filesystems
+ #
+ CONFIG_PROC_FS=y
+ CONFIG_PROC_KCORE=y
+ CONFIG_SYSFS=y
+ # CONFIG_DEVFS_FS is not set
+ CONFIG_DEVPTS_FS_XATTR=y
+ # CONFIG_DEVPTS_FS_SECURITY is not set
+ CONFIG_TMPFS=y
+ # CONFIG_TMPFS_XATTR is not set
+ # CONFIG_HUGETLBFS is not set
+ # CONFIG_HUGETLB_PAGE is not set
+ CONFIG_RAMFS=y
+
+ #
+ # Miscellaneous filesystems
+ #
+ # CONFIG_ADFS_FS is not set
+ # CONFIG_AFFS_FS is not set
+ # CONFIG_HFS_FS is not set
+ # CONFIG_HFSPLUS_FS is not set
+ # CONFIG_BEFS_FS is not set
+ # CONFIG_BFS_FS is not set
+ # CONFIG_EFS_FS is not set
+ # CONFIG_CRAMFS is not set
+ # CONFIG_VXFS_FS is not set
+ # CONFIG_HPFS_FS is not set
+ # CONFIG_QNX4FS_FS is not set
+ # CONFIG_SYSV_FS is not set
+ # CONFIG_UFS_FS is not set
+
+ #
+ # Network File Systems
+ #
+ CONFIG_NFS_FS=y
+ CONFIG_NFS_V3=y
+ # CONFIG_NFS_V4 is not set
+ # CONFIG_NFS_DIRECTIO is not set
+ # CONFIG_NFSD is not set
+ CONFIG_ROOT_NFS=y
+ CONFIG_LOCKD=y
+ CONFIG_LOCKD_V4=y
+ CONFIG_SUNRPC=y
+ # CONFIG_RPCSEC_GSS_KRB5 is not set
+ # CONFIG_RPCSEC_GSS_SPKM3 is not set
+ # CONFIG_SMB_FS is not set
+ # CONFIG_CIFS is not set
+ # CONFIG_NCP_FS is not set
+ # CONFIG_CODA_FS is not set
+ # CONFIG_AFS_FS is not set
+
+ #
+ # Partition Types
+ #
+ # CONFIG_PARTITION_ADVANCED is not set
+ CONFIG_MSDOS_PARTITION=y
+
+ #
+ # Native Language Support
+ #
+ CONFIG_NLS=y
+ CONFIG_NLS_DEFAULT="iso8859-1"
+ CONFIG_NLS_CODEPAGE_437=y
+ # CONFIG_NLS_CODEPAGE_737 is not set
+ # CONFIG_NLS_CODEPAGE_775 is not set
+ # CONFIG_NLS_CODEPAGE_850 is not set
+ # CONFIG_NLS_CODEPAGE_852 is not set
+ # CONFIG_NLS_CODEPAGE_855 is not set
+ # CONFIG_NLS_CODEPAGE_857 is not set
+ # CONFIG_NLS_CODEPAGE_860 is not set
+ # CONFIG_NLS_CODEPAGE_861 is not set
+ # CONFIG_NLS_CODEPAGE_862 is not set
+ # CONFIG_NLS_CODEPAGE_863 is not set
+ # CONFIG_NLS_CODEPAGE_864 is not set
+ # CONFIG_NLS_CODEPAGE_865 is not set
+ # CONFIG_NLS_CODEPAGE_866 is not set
+ # CONFIG_NLS_CODEPAGE_869 is not set
+ # CONFIG_NLS_CODEPAGE_936 is not set
+ # CONFIG_NLS_CODEPAGE_950 is not set
+ # CONFIG_NLS_CODEPAGE_932 is not set
+ # CONFIG_NLS_CODEPAGE_949 is not set
+ # CONFIG_NLS_CODEPAGE_874 is not set
+ # CONFIG_NLS_ISO8859_8 is not set
+ # CONFIG_NLS_CODEPAGE_1250 is not set
+ # CONFIG_NLS_CODEPAGE_1251 is not set
+ # CONFIG_NLS_ASCII is not set
+ CONFIG_NLS_ISO8859_1=y
+ # CONFIG_NLS_ISO8859_2 is not set
+ # CONFIG_NLS_ISO8859_3 is not set
+ # CONFIG_NLS_ISO8859_4 is not set
+ # CONFIG_NLS_ISO8859_5 is not set
+ # CONFIG_NLS_ISO8859_6 is not set
+ # CONFIG_NLS_ISO8859_7 is not set
+ # CONFIG_NLS_ISO8859_9 is not set
+ # CONFIG_NLS_ISO8859_13 is not set
+ # CONFIG_NLS_ISO8859_14 is not set
+ # CONFIG_NLS_ISO8859_15 is not set
+ # CONFIG_NLS_KOI8_R is not set
+ # CONFIG_NLS_KOI8_U is not set
+ # CONFIG_NLS_UTF8 is not set
+
+ #
+ # Security options
+ #
+ # CONFIG_KEYS is not set
+ # CONFIG_SECURITY is not set
+
+ #
+ # Cryptographic options
+ #
+ CONFIG_CRYPTO=y
+ # CONFIG_CRYPTO_HMAC is not set
+ # CONFIG_CRYPTO_NULL is not set
+ # CONFIG_CRYPTO_MD4 is not set
+ CONFIG_CRYPTO_MD5=m
+ # CONFIG_CRYPTO_SHA1 is not set
+ # CONFIG_CRYPTO_SHA256 is not set
+ # CONFIG_CRYPTO_SHA512 is not set
+ # CONFIG_CRYPTO_WP512 is not set
+ # CONFIG_CRYPTO_DES is not set
+ # CONFIG_CRYPTO_BLOWFISH is not set
+ # CONFIG_CRYPTO_TWOFISH is not set
+ # CONFIG_CRYPTO_SERPENT is not set
+ # CONFIG_CRYPTO_AES_586 is not set
+ # CONFIG_CRYPTO_CAST5 is not set
+ # CONFIG_CRYPTO_CAST6 is not set
+ # CONFIG_CRYPTO_TEA is not set
+ # CONFIG_CRYPTO_ARC4 is not set
+ # CONFIG_CRYPTO_KHAZAD is not set
+ # CONFIG_CRYPTO_ANUBIS is not set
+ # CONFIG_CRYPTO_DEFLATE is not set
+ # CONFIG_CRYPTO_MICHAEL_MIC is not set
+ CONFIG_CRYPTO_CRC32C=m
+ # CONFIG_CRYPTO_TEST is not set
+
+ #
+ # Hardware crypto devices
+ #
+ # CONFIG_CRYPTO_DEV_PADLOCK is not set
+
+ #
+ # Library routines
+ #
+ # CONFIG_CRC_CCITT is not set
+ # CONFIG_CRC32 is not set
+ CONFIG_LIBCRC32C=m
+ CONFIG_ZLIB_INFLATE=y
--- /dev/null
- bool
- default n
-#config SMP
-# bool "Symmetric multi-processing support"
-# ---help---
-# This enables support for systems with more than one CPU. If you have
-# a system with only one CPU, like most personal computers, say N. If
-# you have a system with more than one CPU, say Y.
-#
-# If you say N here, the kernel will run on single and multiprocessor
-# machines, but will use only one CPU of a multiprocessor machine. If
-# you say Y here, the kernel will run on many, but not all,
-# singleprocessor machines. On a singleprocessor machine, the kernel
-# will run faster if you say N here.
-#
-# Note that if you say Y here and choose architecture "586" or
-# "Pentium" under "Processor family", the kernel will not work on 486
-# architectures. Similarly, multiprocessor kernels for the "PPro"
-# architecture may not work on all Pentium based boards.
-#
-# People using multiprocessor machines who say Y here should also say
-# Y to "Enhanced Real Time Clock Support", below. The "Advanced Power
-# Management" code will be disabled if you say Y here.
-#
-# See also the <file:Documentation/smp.txt>,
-# <file:Documentation/i386/IO-APIC.txt>,
-# <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
-# <http://www.tldp.org/docs.html#howto>.
-#
-# If you don't know what to do here, say N.
+ #
+ # For a description of the syntax of this configuration file,
+ # see Documentation/kbuild/kconfig-language.txt.
+ #
+
+ menu "X86 Processor Configuration"
+
+ config XENARCH
+ string
+ default i386
+
+ config MMU
+ bool
+ default y
+
+ config SBUS
+ bool
+
+ config UID16
+ bool
+ default y
+
+ config GENERIC_ISA_DMA
+ bool
+ default y
+
+ config GENERIC_IOMAP
+ bool
+ default y
+
+ choice
+ prompt "Processor family"
+ default M686
+
+ config M386
+ bool "386"
+ ---help---
+ This is the processor type of your CPU. This information is used for
+ optimizing purposes. In order to compile a kernel that can run on
+ all x86 CPU types (albeit not optimally fast), you can specify
+ "386" here.
+
+ The kernel will not necessarily run on earlier architectures than
+ the one you have chosen, e.g. a Pentium optimized kernel will run on
+ a PPro, but not necessarily on a i486.
+
+ Here are the settings recommended for greatest speed:
+ - "386" for the AMD/Cyrix/Intel 386DX/DXL/SL/SLC/SX, Cyrix/TI
+ 486DLC/DLC2, UMC 486SX-S and NexGen Nx586. Only "386" kernels
+ will run on a 386 class machine.
+ - "486" for the AMD/Cyrix/IBM/Intel 486DX/DX2/DX4 or
+ SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or U5S.
+ - "586" for generic Pentium CPUs lacking the TSC
+ (time stamp counter) register.
+ - "Pentium-Classic" for the Intel Pentium.
+ - "Pentium-MMX" for the Intel Pentium MMX.
+ - "Pentium-Pro" for the Intel Pentium Pro.
+ - "Pentium-II" for the Intel Pentium II or pre-Coppermine Celeron.
+ - "Pentium-III" for the Intel Pentium III or Coppermine Celeron.
+ - "Pentium-4" for the Intel Pentium 4 or P4-based Celeron.
+ - "K6" for the AMD K6, K6-II and K6-III (aka K6-3D).
+ - "Athlon" for the AMD K7 family (Athlon/Duron/Thunderbird).
+ - "Crusoe" for the Transmeta Crusoe series.
+ - "Efficeon" for the Transmeta Efficeon series.
+ - "Winchip-C6" for original IDT Winchip.
+ - "Winchip-2" for IDT Winchip 2.
+ - "Winchip-2A" for IDT Winchips with 3dNow! capabilities.
+ - "CyrixIII/VIA C3" for VIA Cyrix III or VIA C3.
+ - "VIA C3-2 for VIA C3-2 "Nehemiah" (model 9 and above).
+
+ If you don't know what to do, choose "386".
+
+ config M486
+ bool "486"
+ help
+ Select this for a 486 series processor, either Intel or one of the
+ compatible processors from AMD, Cyrix, IBM, or Intel. Includes DX,
+ DX2, and DX4 variants; also SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or
+ U5S.
+
+ config M586
+ bool "586/K5/5x86/6x86/6x86MX"
+ help
+ Select this for an 586 or 686 series processor such as the AMD K5,
+ the Cyrix 5x86, 6x86 and 6x86MX. This choice does not
+ assume the RDTSC (Read Time Stamp Counter) instruction.
+
+ config M586TSC
+ bool "Pentium-Classic"
+ help
+ Select this for a Pentium Classic processor with the RDTSC (Read
+ Time Stamp Counter) instruction for benchmarking.
+
+ config M586MMX
+ bool "Pentium-MMX"
+ help
+ Select this for a Pentium with the MMX graphics/multimedia
+ extended instructions.
+
+ config M686
+ bool "Pentium-Pro"
+ help
+ Select this for Intel Pentium Pro chips. This enables the use of
+ Pentium Pro extended instructions, and disables the init-time guard
+ against the f00f bug found in earlier Pentiums.
+
+ config MPENTIUMII
+ bool "Pentium-II/Celeron(pre-Coppermine)"
+ help
+ Select this for Intel chips based on the Pentium-II and
+ pre-Coppermine Celeron core. This option enables an unaligned
+ copy optimization, compiles the kernel with optimization flags
+ tailored for the chip, and applies any applicable Pentium Pro
+ optimizations.
+
+ config MPENTIUMIII
+ bool "Pentium-III/Celeron(Coppermine)/Pentium-III Xeon"
+ help
+ Select this for Intel chips based on the Pentium-III and
+ Celeron-Coppermine core. This option enables use of some
+ extended prefetch instructions in addition to the Pentium II
+ extensions.
+
+ config MPENTIUMM
+ bool "Pentium M"
+ help
+ Select this for Intel Pentium M (not Pentium-4 M)
+ notebook chips.
+
+ config MPENTIUM4
+ bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/Xeon"
+ help
+ Select this for Intel Pentium 4 chips. This includes the
+ Pentium 4, P4-based Celeron and Xeon, and Pentium-4 M
+ (not Pentium M) chips. This option enables compile flags
+ optimized for the chip, uses the correct cache shift, and
+ applies any applicable Pentium III optimizations.
+
+ config MK6
+ bool "K6/K6-II/K6-III"
+ help
+ Select this for an AMD K6-family processor. Enables use of
+ some extended instructions, and passes appropriate optimization
+ flags to GCC.
+
+ config MK7
+ bool "Athlon/Duron/K7"
+ help
+ Select this for an AMD Athlon K7-family processor. Enables use of
+ some extended instructions, and passes appropriate optimization
+ flags to GCC.
+
+ config MK8
+ bool "Opteron/Athlon64/Hammer/K8"
+ help
+ Select this for an AMD Opteron or Athlon64 Hammer-family processor. Enables
+ use of some extended instructions, and passes appropriate optimization
+ flags to GCC.
+
+ config MCRUSOE
+ bool "Crusoe"
+ help
+ Select this for a Transmeta Crusoe processor. Treats the processor
+ like a 586 with TSC, and sets some GCC optimization flags (like a
+ Pentium Pro with no alignment requirements).
+
+ config MEFFICEON
+ bool "Efficeon"
+ help
+ Select this for a Transmeta Efficeon processor.
+
+ config MWINCHIPC6
+ bool "Winchip-C6"
+ help
+ Select this for an IDT Winchip C6 chip. Linux and GCC
+ treat this chip as a 586TSC with some extended instructions
+ and alignment requirements.
+
+ config MWINCHIP2
+ bool "Winchip-2"
+ help
+ Select this for an IDT Winchip-2. Linux and GCC
+ treat this chip as a 586TSC with some extended instructions
+ and alignment requirements.
+
+ config MWINCHIP3D
+ bool "Winchip-2A/Winchip-3"
+ help
+ Select this for an IDT Winchip-2A or 3. Linux and GCC
+ treat this chip as a 586TSC with some extended instructions
+ and alignment reqirements. Also enable out of order memory
+ stores for this CPU, which can increase performance of some
+ operations.
+
+ config MCYRIXIII
+ bool "CyrixIII/VIA-C3"
+ help
+ Select this for a Cyrix III or C3 chip. Presently Linux and GCC
+ treat this chip as a generic 586. Whilst the CPU is 686 class,
+ it lacks the cmov extension which gcc assumes is present when
+ generating 686 code.
+ Note that Nehemiah (Model 9) and above will not boot with this
+ kernel due to them lacking the 3DNow! instructions used in earlier
+ incarnations of the CPU.
+
+ config MVIAC3_2
+ bool "VIA C3-2 (Nehemiah)"
+ help
+ Select this for a VIA C3 "Nehemiah". Selecting this enables usage
+ of SSE and tells gcc to treat the CPU as a 686.
+ Note, this kernel will not boot on older (pre model 9) C3s.
+
+ endchoice
+
+ config X86_GENERIC
+ bool "Generic x86 support"
+ help
+ Instead of just including optimizations for the selected
+ x86 variant (e.g. PII, Crusoe or Athlon), include some more
+ generic optimizations as well. This will make the kernel
+ perform better on x86 CPUs other than that selected.
+
+ This is really intended for distributors who need more
+ generic optimizations.
+
+ #
+ # Define implied options from the CPU selection here
+ #
+ config X86_CMPXCHG
+ bool
+ depends on !M386
+ default y
+
+ config X86_XADD
+ bool
+ depends on !M386
+ default y
+
+ config X86_L1_CACHE_SHIFT
+ int
+ default "7" if MPENTIUM4 || X86_GENERIC
+ default "4" if X86_ELAN || M486 || M386
+ default "5" if MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2
+ default "6" if MK7 || MK8 || MPENTIUMM
+
+ config RWSEM_GENERIC_SPINLOCK
+ bool
+ depends on M386
+ default y
+
+ config RWSEM_XCHGADD_ALGORITHM
+ bool
+ depends on !M386
+ default y
+
+ config GENERIC_CALIBRATE_DELAY
+ bool
+ default y
+
+ config X86_PPRO_FENCE
+ bool
+ depends on M686 || M586MMX || M586TSC || M586 || M486 || M386
+ default y
+
+ config X86_F00F_BUG
+ bool
+ depends on M586MMX || M586TSC || M586 || M486 || M386
+ default y
+
+ config X86_WP_WORKS_OK
+ bool
+ depends on !M386
+ default y
+
+ config X86_INVLPG
+ bool
+ depends on !M386
+ default y
+
+ config X86_BSWAP
+ bool
+ depends on !M386
+ default y
+
+ config X86_POPAD_OK
+ bool
+ depends on !M386
+ default y
+
+ config X86_ALIGNMENT_16
+ bool
+ depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2
+ default y
+
+ config X86_GOOD_APIC
+ bool
+ depends on MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || MK8 || MEFFICEON
+ default y
+
+ config X86_INTEL_USERCOPY
+ bool
+ depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON
+ default y
+
+ config X86_USE_PPRO_CHECKSUM
+ bool
+ depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON
+ default y
+
+ config X86_USE_3DNOW
+ bool
+ depends on MCYRIXIII || MK7
+ default y
+
+ config X86_OOSTORE
+ bool
+ depends on (MWINCHIP3D || MWINCHIP2 || MWINCHIPC6) && MTRR
+ default y
+
+ config HPET_TIMER
+ bool
+ default n
+ #config HPET_TIMER
+ # bool "HPET Timer Support"
+ # help
+ # This enables the use of the HPET for the kernel's internal timer.
+ # HPET is the next generation timer replacing legacy 8254s.
+ # You can safely choose Y here. However, HPET will only be
+ # activated if the platform and the BIOS support this feature.
+ # Otherwise the 8254 will be used for timing services.
+ #
+ # Choose N to continue using the legacy 8254 timer.
+
+ config HPET_EMULATE_RTC
+ def_bool HPET_TIMER && RTC=y
+
+ config SMP
-config X86_LOCAL_APIC
- bool
- depends on (X86_VISWS || SMP) && !X86_VOYAGER
- default y
-
-config X86_IO_APIC
- bool
- depends on SMP && !(X86_VISWS || X86_VOYAGER)
- default y
++ bool "Symmetric multi-processing support"
++ ---help---
++ This enables support for systems with more than one CPU. If you have
++ a system with only one CPU, like most personal computers, say N. If
++ you have a system with more than one CPU, say Y.
++
++ If you say N here, the kernel will run on single and multiprocessor
++ machines, but will use only one CPU of a multiprocessor machine. If
++ you say Y here, the kernel will run on many, but not all,
++ singleprocessor machines. On a singleprocessor machine, the kernel
++ will run faster if you say N here.
++
++ Note that if you say Y here and choose architecture "586" or
++ "Pentium" under "Processor family", the kernel will not work on 486
++ architectures. Similarly, multiprocessor kernels for the "PPro"
++ architecture may not work on all Pentium based boards.
++
++ People using multiprocessor machines who say Y here should also say
++ Y to "Enhanced Real Time Clock Support", below. The "Advanced Power
++ Management" code will be disabled if you say Y here.
++
++ See also the <file:Documentation/smp.txt>,
++ <file:Documentation/i386/IO-APIC.txt>,
++ <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
++ <http://www.tldp.org/docs.html#howto>.
++
++ If you don't know what to do here, say N.
+
+ config NR_CPUS
+ int "Maximum number of CPUs (2-255)"
+ range 2 255
+ depends on SMP
+ default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000
+ default "8"
+ help
+ This allows you to specify the maximum number of CPUs which this
+ kernel will support. The maximum supported value is 255 and the
+ minimum value which makes sense is 2.
+
+ This is purely to save memory - each supported CPU adds
+ approximately eight kilobytes to the kernel image.
+
+ config SCHED_SMT
+ bool "SMT (Hyperthreading) scheduler support"
+ depends on SMP
+ default off
+ help
+ SMT scheduler support improves the CPU scheduler's decision making
+ when dealing with Intel Pentium 4 chips with HyperThreading at a
+ cost of slightly increased overhead in some places. If unsure say
+ N here.
+
+ config PREEMPT
+ bool "Preemptible Kernel"
+ help
+ This option reduces the latency of the kernel when reacting to
+ real-time or interactive events by allowing a low priority process to
+ be preempted even if it is in kernel mode executing a system call.
+ This allows applications to run more reliably even when the system is
+ under load.
+
+ Say Y here if you are building a kernel for a desktop, embedded
+ or real-time system. Say N if you are unsure.
+
+ config PREEMPT_BKL
+ bool "Preempt The Big Kernel Lock"
+ depends on PREEMPT
+ default y
+ help
+ This option reduces the latency of the kernel by making the
+ big kernel lock preemptible.
+
+ Say Y here if you are building a kernel for a desktop system.
+ Say N if you are unsure.
+
+ #config X86_TSC
+ # bool
+ # depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2) && !X86_NUMAQ
+ # default y
+
+ #config X86_MCE
+ # bool "Machine Check Exception"
+ # depends on !X86_VOYAGER
+ # ---help---
+ # Machine Check Exception support allows the processor to notify the
+ # kernel if it detects a problem (e.g. overheating, component failure).
+ # The action the kernel takes depends on the severity of the problem,
+ # ranging from a warning message on the console, to halting the machine.
+ # Your processor must be a Pentium or newer to support this - check the
+ # flags in /proc/cpuinfo for mce. Note that some older Pentium systems
+ # have a design flaw which leads to false MCE events - hence MCE is
+ # disabled on all P5 processors, unless explicitly enabled with "mce"
+ # as a boot argument. Similarly, if MCE is built in and creates a
+ # problem on some new non-standard machine, you can boot with "nomce"
+ # to disable it. MCE support simply ignores non-MCE processors like
+ # the 386 and 486, so nearly everyone can say Y here.
+
+ #config X86_MCE_NONFATAL
+ # tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel Pentium 4"
+ # depends on X86_MCE
+ # help
+ # Enabling this feature starts a timer that triggers every 5 seconds which
+ # will look at the machine check registers to see if anything happened.
+ # Non-fatal problems automatically get corrected (but still logged).
+ # Disable this if you don't want to see these messages.
+ # Seeing the messages this option prints out may be indicative of dying hardware,
+ # or out-of-spec (ie, overclocked) hardware.
+ # This option only does something on certain CPUs.
+ # (AMD Athlon/Duron and Intel Pentium 4)
+
+ #config X86_MCE_P4THERMAL
+ # bool "check for P4 thermal throttling interrupt."
+ # depends on X86_MCE && (X86_UP_APIC || SMP)
+ # help
+ # Enabling this feature will cause a message to be printed when the P4
+ # enters thermal throttling.
+
+ config MICROCODE
+ tristate "/dev/cpu/microcode - Intel IA32 CPU microcode support"
+ depends on XEN_PRIVILEGED_GUEST
+ ---help---
+ If you say Y here and also to "/dev file system support" in the
+ 'File systems' section, you will be able to update the microcode on
+ Intel processors in the IA32 family, e.g. Pentium Pro, Pentium II,
+ Pentium III, Pentium 4, Xeon etc. You will obviously need the
+ actual microcode binary data itself which is not shipped with the
+ Linux kernel.
+
+ For latest news and information on obtaining all the required
+ ingredients for this driver, check:
+ <http://www.urbanmyth.org/microcode/>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called microcode.
+
+ #config X86_MSR
+ # tristate "/dev/cpu/*/msr - Model-specific register support"
+ # help
+ # This device gives privileged processes access to the x86
+ # Model-Specific Registers (MSRs). It is a character device with
+ # major 202 and minors 0 to 31 for /dev/cpu/0/msr to /dev/cpu/31/msr.
+ # MSR accesses are directed to a specific CPU on multi-processor
+ # systems.
+
+ config X86_CPUID
+ tristate "/dev/cpu/*/cpuid - CPU information support"
+ help
+ This device gives processes access to the x86 CPUID instruction to
+ be executed on a specific processor. It is a character device
+ with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
+ /dev/cpu/31/cpuid.
+
+ source "drivers/firmware/Kconfig"
+
+ choice
+ prompt "High Memory Support"
+ default NOHIGHMEM
+
+ config NOHIGHMEM
+ bool "off"
+ ---help---
+ Linux can use up to 64 Gigabytes of physical memory on x86 systems.
+ However, the address space of 32-bit x86 processors is only 4
+ Gigabytes large. That means that, if you have a large amount of
+ physical memory, not all of it can be "permanently mapped" by the
+ kernel. The physical memory that's not permanently mapped is called
+ "high memory".
+
+ If you are compiling a kernel which will never run on a machine with
+ more than 1 Gigabyte total physical RAM, answer "off" here (default
+ choice and suitable for most users). This will result in a "3GB/1GB"
+ split: 3GB are mapped so that each process sees a 3GB virtual memory
+ space and the remaining part of the 4GB virtual memory space is used
+ by the kernel to permanently map as much physical memory as
+ possible.
+
+ If the machine has between 1 and 4 Gigabytes physical RAM, then
+ answer "4GB" here.
+
+ If more than 4 Gigabytes is used then answer "64GB" here. This
+ selection turns Intel PAE (Physical Address Extension) mode on.
+ PAE implements 3-level paging on IA32 processors. PAE is fully
+ supported by Linux, PAE mode is implemented on all recent Intel
+ processors (Pentium Pro and better). NOTE: If you say "64GB" here,
+ then the kernel will not boot on CPUs that don't support PAE!
+
+ The actual amount of total physical memory will either be
+ auto detected or can be forced by using a kernel command line option
+ such as "mem=256M". (Try "man bootparam" or see the documentation of
+ your boot loader (lilo or loadlin) about how to pass options to the
+ kernel at boot time.)
+
+ If unsure, say "off".
+
+ config HIGHMEM4G
+ bool "4GB"
+ help
+ Select this if you have a 32-bit processor and between 1 and 4
+ gigabytes of physical RAM.
+
+ #config HIGHMEM64G
+ # bool "64GB"
+ # help
+ # Select this if you have a 32-bit processor and more than 4
+ # gigabytes of physical RAM.
+
+ endchoice
+
+ config HIGHMEM
+ bool
+ depends on HIGHMEM64G || HIGHMEM4G
+ default y
+
+ config X86_PAE
+ bool
+ depends on HIGHMEM64G
+ default y
+
+ # Common NUMA Features
+ config NUMA
+ bool "Numa Memory Allocation and Scheduler Support"
+ depends on SMP && HIGHMEM64G && (X86_NUMAQ || X86_GENERICARCH || (X86_SUMMIT && ACPI))
+ default n if X86_PC
+ default y if (X86_NUMAQ || X86_SUMMIT)
+
+ # Need comments to help the hapless user trying to turn on NUMA support
+ comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support"
+ depends on X86_NUMAQ && (!HIGHMEM64G || !SMP)
+
+ comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
+ depends on X86_SUMMIT && (!HIGHMEM64G || !ACPI)
+
+ config DISCONTIGMEM
+ bool
+ depends on NUMA
+ default y
+
+ config HAVE_ARCH_BOOTMEM_NODE
+ bool
+ depends on NUMA
+ default y
+
+ #config HIGHPTE
+ # bool "Allocate 3rd-level pagetables from highmem"
+ # depends on HIGHMEM4G || HIGHMEM64G
+ # help
+ # The VM uses one page table entry for each page of physical memory.
+ # For systems with a lot of RAM, this can be wasteful of precious
+ # low memory. Setting this option will put user-space page table
+ # entries in high memory.
+
+ config MTRR
+ bool
+ depends on XEN_PRIVILEGED_GUEST
+ default y
+
+ #config MTRR
+ # bool "MTRR (Memory Type Range Register) support"
+ # ---help---
+ # On Intel P6 family processors (Pentium Pro, Pentium II and later)
+ # the Memory Type Range Registers (MTRRs) may be used to control
+ # processor access to memory ranges. This is most useful if you have
+ # a video (VGA) card on a PCI or AGP bus. Enabling write-combining
+ # allows bus write transfers to be combined into a larger transfer
+ # before bursting over the PCI/AGP bus. This can increase performance
+ # of image write operations 2.5 times or more. Saying Y here creates a
+ # /proc/mtrr file which may be used to manipulate your processor's
+ # MTRRs. Typically the X server should use this.
+ #
+ # This code has a reasonably generic interface so that similar
+ # control registers on other processors can be easily supported
+ # as well:
+ #
+ # The Cyrix 6x86, 6x86MX and M II processors have Address Range
+ # Registers (ARRs) which provide a similar functionality to MTRRs. For
+ # these, the ARRs are used to emulate the MTRRs.
+ # The AMD K6-2 (stepping 8 and above) and K6-3 processors have two
+ # MTRRs. The Centaur C6 (WinChip) has 8 MCRs, allowing
+ # write-combining. All of these processors are supported by this code
+ # and it makes sense to say Y here if you have one of them.
+ #
+ # Saying Y here also fixes a problem with buggy SMP BIOSes which only
+ # set the MTRRs for the boot CPU and not for the secondary CPUs. This
+ # can lead to all sorts of problems, so it's good to say Y here.
+ #
+ # You can safely say Y even if your machine doesn't have MTRRs, you'll
+ # just add about 9 KB to your kernel.
+ #
+ # See <file:Documentation/mtrr.txt> for more information.
+
+ config IRQBALANCE
+ bool "Enable kernel irq balancing"
+ depends on SMP && X86_IO_APIC
+ default y
+ help
+ The default yes will allow the kernel to do irq load balancing.
+ Saying no will keep the kernel from doing irq load balancing.
+
+ config HAVE_DEC_LOCK
+ bool
+ depends on (SMP || PREEMPT) && X86_CMPXCHG
+ default y
+
+ # turning this on wastes a bunch of space.
+ # Summit needs it only when NUMA is on
+ config BOOT_IOREMAP
+ bool
+ depends on (((X86_SUMMIT || X86_GENERICARCH) && NUMA) || (X86 && EFI))
+ default y
+
+ config REGPARM
+ bool "Use register arguments (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ default n
+ help
+ Compile the kernel with -mregparm=3. This uses a different ABI
+ and passes the first three arguments of a function call in registers.
+ This will probably break binary only modules.
+
+ This feature is only enabled for gcc-3.0 and later - earlier compilers
+ generate incorrect output with certain kernel constructs when
+ -mregparm=3 is used.
+
+
++config X86_LOCAL_APIC
++ bool
++ depends on (X86_VISWS || SMP) && !X86_VOYAGER
++ default n
++
+ if XEN_PHYSDEV_ACCESS
+
+ menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)"
+
+ config X86_VISWS_APIC
+ bool
+ depends on X86_VISWS
+ default y
+
-config X86_HT
- bool
- depends on SMP && !(X86_VISWS || X86_VOYAGER)
- default y
++#config X86_IO_APIC
++# bool
++# depends on SMP && !(X86_VISWS || X86_VOYAGER)
++# default y
+
+ config PCI
+ bool "PCI support" if !X86_VISWS
+ depends on !X86_VOYAGER
+ default y if X86_VISWS
+ help
+ Find out whether you have a PCI motherboard. PCI is the name of a
+ bus system, i.e. the way the CPU talks to the other stuff inside
+ your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
+ VESA. If you have PCI, say Y, otherwise N.
+
+ The PCI-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>, contains valuable
+ information about which PCI hardware does work under Linux and which
+ doesn't.
+
+ #choice
+ # prompt "PCI access mode"
+ # depends on PCI && !X86_VISWS
+ # default PCI_GOANY
+ # ---help---
+ # On PCI systems, the BIOS can be used to detect the PCI devices and
+ # determine their configuration. However, some old PCI motherboards
+ # have BIOS bugs and may crash if this is done. Also, some embedded
+ # PCI-based systems don't have any BIOS at all. Linux can also try to
+ # detect the PCI hardware directly without using the BIOS.
+ #
+ # With this option, you can specify how Linux should detect the
+ # PCI devices. If you choose "BIOS", the BIOS will be used,
+ # if you choose "Direct", the BIOS won't be used, and if you
+ # choose "MMConfig", then PCI Express MMCONFIG will be used.
+ # If you choose "Any", the kernel will try MMCONFIG, then the
+ # direct access method and falls back to the BIOS if that doesn't
+ # work. If unsure, go with the default, which is "Any".
+ #
+ #config PCI_GOBIOS
+ # bool "BIOS"
+ #
+ #config PCI_GOMMCONFIG
+ # bool "MMConfig"
+ #
+ #config PCI_GODIRECT
+ # bool "Direct"
+ #
+ #config PCI_GOANY
+ # bool "Any"
+ #
+ #endchoice
+ #
+ #config PCI_BIOS
+ # bool
+ # depends on !X86_VISWS && PCI && (PCI_GOBIOS || PCI_GOANY)
+ # default y
+ #
+ #config PCI_DIRECT
+ # bool
+ # depends on PCI && ((PCI_GODIRECT || PCI_GOANY) || X86_VISWS)
+ # default y
+
+ config PCI_DIRECT
+ bool
+ depends on PCI
+ default y
+
+ source "drivers/pci/pcie/Kconfig"
+
+ source "drivers/pci/Kconfig"
+
+ config ISA
+ bool "ISA support"
+ depends on !(X86_VOYAGER || X86_VISWS)
+ help
+ Find out whether you have ISA slots on your motherboard. ISA is the
+ name of a bus system, i.e. the way the CPU talks to the other stuff
+ inside your box. Other bus systems are PCI, EISA, MicroChannel
+ (MCA) or VESA. ISA is an older system, now being displaced by PCI;
+ newer boards don't support it. If you have ISA, say Y, otherwise N.
+
+ config EISA
+ bool "EISA support"
+ depends on ISA
+ ---help---
+ The Extended Industry Standard Architecture (EISA) bus was
+ developed as an open alternative to the IBM MicroChannel bus.
+
+ The EISA bus provided some of the features of the IBM MicroChannel
+ bus while maintaining backward compatibility with cards made for
+ the older ISA bus. The EISA bus saw limited use between 1988 and
+ 1995 when it was made obsolete by the PCI bus.
+
+ Say Y here if you are building a kernel for an EISA-based machine.
+
+ Otherwise, say N.
+
+ source "drivers/eisa/Kconfig"
+
+ config MCA
+ bool "MCA support"
+ depends on !(X86_VISWS || X86_VOYAGER)
+ help
+ MicroChannel Architecture is found in some IBM PS/2 machines and
+ laptops. It is a bus system similar to PCI or ISA. See
+ <file:Documentation/mca.txt> (and especially the web page given
+ there) before attempting to build an MCA bus kernel.
+
+ config MCA
+ depends on X86_VOYAGER
+ default y if X86_VOYAGER
+
+ source "drivers/mca/Kconfig"
+
+ config SCx200
+ tristate "NatSemi SCx200 support"
+ depends on !X86_VOYAGER
+ help
+ This provides basic support for the National Semiconductor SCx200
+ processor. Right now this is just a driver for the GPIO pins.
+
+ If you don't know what to do here, say N.
+
+ This support is also available as a module. If compiled as a
+ module, it will be called scx200.
+
+ source "drivers/pcmcia/Kconfig"
+
+ source "drivers/pci/hotplug/Kconfig"
+
+ endmenu
+
+ endif
+
+ menu "Kernel hacking"
+
+ config DEBUG_KERNEL
+ bool "Kernel debugging"
+ help
+ Say Y here if you are developing drivers or trying to debug and
+ identify kernel problems.
+
+ config EARLY_PRINTK
+ bool "Early printk" if EMBEDDED
+ default y
+ help
+ Write kernel log output directly into the VGA buffer or to a serial
+ port.
+
+ This is useful for kernel debugging when your machine crashes very
+ early before the console code is initialized. For normal operation
+ it is not recommended because it looks ugly and doesn't cooperate
+ with klogd/syslogd or the X server. You should normally N here,
+ unless you want to debug such a crash.
+
+ config DEBUG_STACKOVERFLOW
+ bool "Check for stack overflows"
+ depends on DEBUG_KERNEL
+
+ config DEBUG_STACK_USAGE
+ bool "Stack utilization instrumentation"
+ depends on DEBUG_KERNEL
+ help
+ Enables the display of the minimum amount of free stack which each
+ task has ever had available in the sysrq-T and sysrq-P debug output.
+
+ This option will slow down process creation somewhat.
+
+ config DEBUG_SLAB
+ bool "Debug memory allocations"
+ depends on DEBUG_KERNEL
+ help
+ Say Y here to have the kernel do limited verification on memory
+ allocation as well as poisoning memory on free to catch use of freed
+ memory.
+
+ config MAGIC_SYSRQ
+ bool "Magic SysRq key"
+ depends on DEBUG_KERNEL
+ help
+ If you say Y here, you will have some control over the system even
+ if the system crashes for example during kernel debugging (e.g., you
+ will be able to flush the buffer cache to disk, reboot the system
+ immediately or dump some status information). This is accomplished
+ by pressing various keys while holding SysRq (Alt+PrintScreen). It
+ also works on a serial console (on PC hardware at least), if you
+ send a BREAK and then within 5 seconds a command keypress. The
+ keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
+ unless you really know what this hack does.
+
+ config DEBUG_SPINLOCK
+ bool "Spinlock debugging"
+ depends on DEBUG_KERNEL
+ help
+ Say Y here and build SMP to catch missing spinlock initialization
+ and certain other kinds of spinlock errors commonly made. This is
+ best used in conjunction with the NMI watchdog so that spinlock
+ deadlocks are also debuggable.
+
+ config DEBUG_PAGEALLOC
+ bool "Page alloc debugging"
+ depends on DEBUG_KERNEL
+ help
+ Unmap pages from the kernel linear mapping after free_pages().
+ This results in a large slowdown, but helps to find certain types
+ of memory corruptions.
+
+ config DEBUG_HIGHMEM
+ bool "Highmem debugging"
+ depends on DEBUG_KERNEL && HIGHMEM
+ help
+ This options enables addition error checking for high memory systems.
+ Disable for production systems.
+
+ config DEBUG_INFO
+ bool "Compile the kernel with debug info"
+ depends on DEBUG_KERNEL
+ help
+ If you say Y here the resulting kernel image will include
+ debugging info resulting in a larger kernel image.
+ Say Y here only if you plan to use gdb to debug the kernel.
+ If you don't debug the kernel, you can say N.
+
+ config DEBUG_SPINLOCK_SLEEP
+ bool "Sleep-inside-spinlock checking"
+ help
+ If you say Y here, various routines which may sleep will become very
+ noisy if they are called with a spinlock held.
+
+ config FRAME_POINTER
+ bool "Compile the kernel with frame pointers"
+ help
+ If you say Y here the resulting kernel image will be slightly larger
+ and slower, but it will give very useful debugging information.
+ If you don't debug the kernel, you can say N, but we may not be able
+ to solve problems without frame pointers.
+
+ config 4KSTACKS
+ bool "Use 4Kb for kernel stacks instead of 8Kb"
+ help
+ If you say Y here the kernel will use a 4Kb stacksize for the
+ kernel stack attached to each process/thread. This facilitates
+ running more threads on a system and also reduces the pressure
+ on the VM subsystem for higher order allocations. This option
+ will also use IRQ stacks to compensate for the reduced stackspace.
+
+ config X86_FIND_SMP_CONFIG
+ bool
+ depends on X86_LOCAL_APIC || X86_VOYAGER
+ default y
+
+ config X86_MPPARSE
+ bool
+ depends on X86_LOCAL_APIC && !X86_VISWS
+ default y
+
+ endmenu
+
+ #
+ # Use the generic interrupt handling code in kernel/irq/:
+ #
+ config GENERIC_HARDIRQS
+ bool
+ default y
+
+ config GENERIC_IRQ_PROBE
+ bool
+ default y
+
+ config X86_SMP
+ bool
+ depends on SMP && !X86_VOYAGER
+ default y
+
++#config X86_HT
++# bool
++# depends on SMP && !(X86_VISWS || X86_VOYAGER)
++# default y
+
+ config X86_BIOS_REBOOT
+ bool
+ depends on !(X86_VISWS || X86_VOYAGER)
+ default y
+
+ config X86_TRAMPOLINE
+ bool
+ depends on X86_SMP || (X86_VOYAGER && SMP)
+ default y
+
+ config PC
+ bool
+ depends on X86 && !EMBEDDED
+ default y
+
+ endmenu
--- /dev/null
-c-obj-$(CONFIG_X86_SMP) += smp.o smpboot.o
-c-obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
+ #
+ # Makefile for the linux kernel.
+ #
+
+ XENARCH := $(subst ",,$(CONFIG_XENARCH))
+
+ CFLAGS += -Iarch/$(XENARCH)/kernel
+
+ extra-y := head.o init_task.o
+
+ obj-y := process.o signal.o entry.o traps.o \
+ time.o ioport.o ldt.o setup.o \
+ pci-dma.o i386_ksyms.o
+
+ c-obj-y := semaphore.o irq.o vm86.o \
+ ptrace.o sys_i386.o \
+ i387.o dmi_scan.o bootflag.o \
+ doublefault.o quirks.o
+ s-obj-y :=
+
+ obj-y += cpu/
+ obj-y += timers/
+ c-obj-$(CONFIG_ACPI_BOOT) += acpi/
+ #c-obj-$(CONFIG_X86_BIOS_REBOOT) += reboot.o
+ c-obj-$(CONFIG_MCA) += mca.o
+ c-obj-$(CONFIG_X86_MSR) += msr.o
+ c-obj-$(CONFIG_X86_CPUID) += cpuid.o
+ obj-$(CONFIG_MICROCODE) += microcode.o
+ c-obj-$(CONFIG_APM) += apm.o
-c-obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
++obj-$(CONFIG_X86_SMP) += smp.o smpboot.o
++#obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
+ c-obj-$(CONFIG_X86_MPPARSE) += mpparse.o
++#obj-$(CONFIG_X86_LOCAL_APIC) += apic.o
++c-obj-$(CONFIG_X86_LOCAL_APIC) += nmi.o
+ c-obj-$(CONFIG_X86_IO_APIC) += io_apic.o
+ c-obj-$(CONFIG_X86_NUMAQ) += numaq.o
+ c-obj-$(CONFIG_X86_SUMMIT_NUMA) += summit.o
+ c-obj-$(CONFIG_MODULES) += module.o
+ c-obj-y += sysenter.o
+ obj-y += vsyscall.o
+ c-obj-$(CONFIG_ACPI_SRAT) += srat.o
+ c-obj-$(CONFIG_HPET_TIMER) += time_hpet.o
+ c-obj-$(CONFIG_EFI) += efi.o efi_stub.o
+ c-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+
+ EXTRA_AFLAGS := -traditional
+
+ c-obj-$(CONFIG_SCx200) += scx200.o
+
+ # vsyscall.o contains the vsyscall DSO images as __initdata.
+ # We must build both images before we can assemble it.
+ # Note: kbuild does not track this dependency due to usage of .incbin
+ $(obj)/vsyscall.o: $(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so
+ targets += $(foreach F,int80 sysenter,vsyscall-$F.o vsyscall-$F.so)
+
+ # The DSO images are built using a special linker script.
+ quiet_cmd_syscall = SYSCALL $@
+ cmd_syscall = $(CC) -nostdlib -m32 $(SYSCFLAGS_$(@F)) \
+ -Wl,-T,$(filter-out FORCE,$^) -o $@
+
+ vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1
+ SYSCFLAGS_vsyscall-sysenter.so = $(vsyscall-flags)
+ SYSCFLAGS_vsyscall-int80.so = $(vsyscall-flags)
+
+ $(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so: \
+ $(obj)/vsyscall-%.so: $(obj)/vsyscall.lds $(obj)/vsyscall-%.o FORCE
+ $(call if_changed,syscall)
+
+ # We also create a special relocatable object that should mirror the symbol
+ # table and layout of the linked DSO. With ld -R we can then refer to
+ # these symbols in the kernel code rather than hand-coded addresses.
+ extra-y += vsyscall-syms.o
+ $(obj)/built-in.o: $(obj)/vsyscall-syms.o
+ $(obj)/built-in.o: ld_flags += -R $(obj)/vsyscall-syms.o
+
+ SYSCFLAGS_vsyscall-syms.o = -r
+ $(obj)/vsyscall-syms.o: $(obj)/vsyscall.lds $(obj)/vsyscall-sysenter.o FORCE
+ $(call if_changed,syscall)
+
+ c-link := init_task.o
+ s-link := vsyscall-int80.o vsyscall-sysenter.o vsyscall-sigreturn.o
+
+ $(patsubst %.o,$(obj)/%.c,$(c-obj-y) $(c-link)) $(patsubst %.o,$(obj)/%.S,$(s-obj-y) $(s-link)):
+ @ln -fsn $(srctree)/arch/i386/kernel/$(notdir $@) $@
+
+ $(obj)/vsyscall-int80.S: $(obj)/vsyscall-sigreturn.S
+
+ obj-y += $(c-obj-y) $(s-obj-y)
+
+ clean-files += $(patsubst %.o,%.c,$(c-obj-y) $(c-obj-) $(c-link))
+ clean-files += $(patsubst %.o,%.S,$(s-obj-y) $(s-obj-) $(s-link))
--- /dev/null
- /*
- * Initialize the per-CPU GDT with the boot GDT,
- * and set up the GDT descriptor:
- */
- if (cpu) {
- cpu_gdt_descr[cpu].size = GDT_SIZE;
- cpu_gdt_descr[cpu].address = 0; /* XXXcl alloc page */
- BUG(); /* XXXcl SMP */
- memcpy((void *)cpu_gdt_descr[cpu].address,
- (void *)cpu_gdt_descr[0].address, GDT_SIZE);
- }
+ #include <linux/init.h>
+ #include <linux/string.h>
+ #include <linux/delay.h>
+ #include <linux/smp.h>
+ #include <linux/module.h>
+ #include <linux/percpu.h>
+ #include <asm/semaphore.h>
+ #include <asm/processor.h>
+ #include <asm/i387.h>
+ #include <asm/msr.h>
+ #include <asm/io.h>
+ #include <asm/mmu_context.h>
+ #ifdef CONFIG_X86_LOCAL_APIC
+ #include <asm/mpspec.h>
+ #include <asm/apic.h>
+ #include <mach_apic.h>
+ #endif
+ #include <asm-xen/hypervisor.h>
+
+ #include "cpu.h"
+
+ DEFINE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]);
+ EXPORT_PER_CPU_SYMBOL(cpu_gdt_table);
+
+ static int cachesize_override __initdata = -1;
+ static int disable_x86_fxsr __initdata = 0;
+ static int disable_x86_serial_nr __initdata = 1;
+
+ struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
+
+ extern void mcheck_init(struct cpuinfo_x86 *c);
+
+ extern void machine_specific_modify_cpu_capabilities(struct cpuinfo_x86 *c);
+
+ extern int disable_pse;
+
+ static void default_init(struct cpuinfo_x86 * c)
+ {
+ /* Not much we can do here... */
+ /* Check if at least it has cpuid */
+ if (c->cpuid_level == -1) {
+ /* No cpuid. It must be an ancient CPU */
+ if (c->x86 == 4)
+ strcpy(c->x86_model_id, "486");
+ else if (c->x86 == 3)
+ strcpy(c->x86_model_id, "386");
+ }
+ }
+
+ static struct cpu_dev default_cpu = {
+ .c_init = default_init,
+ };
+ static struct cpu_dev * this_cpu = &default_cpu;
+
+ static int __init cachesize_setup(char *str)
+ {
+ get_option (&str, &cachesize_override);
+ return 1;
+ }
+ __setup("cachesize=", cachesize_setup);
+
+ int __init get_model_name(struct cpuinfo_x86 *c)
+ {
+ unsigned int *v;
+ char *p, *q;
+
+ if (cpuid_eax(0x80000000) < 0x80000004)
+ return 0;
+
+ v = (unsigned int *) c->x86_model_id;
+ cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
+ cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
+ cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
+ c->x86_model_id[48] = 0;
+
+ /* Intel chips right-justify this string for some dumb reason;
+ undo that brain damage */
+ p = q = &c->x86_model_id[0];
+ while ( *p == ' ' )
+ p++;
+ if ( p != q ) {
+ while ( *p )
+ *q++ = *p++;
+ while ( q <= &c->x86_model_id[48] )
+ *q++ = '\0'; /* Zero-pad the rest */
+ }
+
+ return 1;
+ }
+
+
+ void __init display_cacheinfo(struct cpuinfo_x86 *c)
+ {
+ unsigned int n, dummy, ecx, edx, l2size;
+
+ n = cpuid_eax(0x80000000);
+
+ if (n >= 0x80000005) {
+ cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
+ printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
+ edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
+ c->x86_cache_size=(ecx>>24)+(edx>>24);
+ }
+
+ if (n < 0x80000006) /* Some chips just has a large L1. */
+ return;
+
+ ecx = cpuid_ecx(0x80000006);
+ l2size = ecx >> 16;
+
+ /* do processor-specific cache resizing */
+ if (this_cpu->c_size_cache)
+ l2size = this_cpu->c_size_cache(c,l2size);
+
+ /* Allow user to override all this if necessary. */
+ if (cachesize_override != -1)
+ l2size = cachesize_override;
+
+ if ( l2size == 0 )
+ return; /* Again, no L2 cache is possible */
+
+ c->x86_cache_size = l2size;
+
+ printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
+ l2size, ecx & 0xFF);
+ }
+
+ /* Naming convention should be: <Name> [(<Codename>)] */
+ /* This table only is used unless init_<vendor>() below doesn't set it; */
+ /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
+
+ /* Look up CPU names by table lookup. */
+ static char __init *table_lookup_model(struct cpuinfo_x86 *c)
+ {
+ struct cpu_model_info *info;
+
+ if ( c->x86_model >= 16 )
+ return NULL; /* Range check */
+
+ if (!this_cpu)
+ return NULL;
+
+ info = this_cpu->c_models;
+
+ while (info && info->family) {
+ if (info->family == c->x86)
+ return info->model_names[c->x86_model];
+ info++;
+ }
+ return NULL; /* Not found */
+ }
+
+
+ void __init get_cpu_vendor(struct cpuinfo_x86 *c, int early)
+ {
+ char *v = c->x86_vendor_id;
+ int i;
+
+ for (i = 0; i < X86_VENDOR_NUM; i++) {
+ if (cpu_devs[i]) {
+ if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
+ (cpu_devs[i]->c_ident[1] &&
+ !strcmp(v,cpu_devs[i]->c_ident[1]))) {
+ c->x86_vendor = i;
+ if (!early)
+ this_cpu = cpu_devs[i];
+ break;
+ }
+ }
+ }
+ }
+
+
+ static int __init x86_fxsr_setup(char * s)
+ {
+ disable_x86_fxsr = 1;
+ return 1;
+ }
+ __setup("nofxsr", x86_fxsr_setup);
+
+
+ /* Standard macro to see if a specific flag is changeable */
+ static inline int flag_is_changeable_p(u32 flag)
+ {
+ u32 f1, f2;
+
+ asm("pushfl\n\t"
+ "pushfl\n\t"
+ "popl %0\n\t"
+ "movl %0,%1\n\t"
+ "xorl %2,%0\n\t"
+ "pushl %0\n\t"
+ "popfl\n\t"
+ "pushfl\n\t"
+ "popl %0\n\t"
+ "popfl\n\t"
+ : "=&r" (f1), "=&r" (f2)
+ : "ir" (flag));
+
+ return ((f1^f2) & flag) != 0;
+ }
+
+
+ /* Probe for the CPUID instruction */
+ int __init have_cpuid_p(void)
+ {
+ return flag_is_changeable_p(X86_EFLAGS_ID);
+ }
+
+ /* Do minimum CPU detection early.
+ Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
+ The others are not touched to avoid unwanted side effects. */
+ void __init early_cpu_detect(void)
+ {
+ struct cpuinfo_x86 *c = &boot_cpu_data;
+
+ c->x86_cache_alignment = 32;
+
+ if (!have_cpuid_p())
+ return;
+
+ /* Get vendor name */
+ cpuid(0x00000000, &c->cpuid_level,
+ (int *)&c->x86_vendor_id[0],
+ (int *)&c->x86_vendor_id[8],
+ (int *)&c->x86_vendor_id[4]);
+
+ get_cpu_vendor(c, 1);
+
+ c->x86 = 4;
+ if (c->cpuid_level >= 0x00000001) {
+ u32 junk, tfms, cap0, misc;
+ cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
+ c->x86 = (tfms >> 8) & 15;
+ c->x86_model = (tfms >> 4) & 15;
+ if (c->x86 == 0xf) {
+ c->x86 += (tfms >> 20) & 0xff;
+ c->x86_model += ((tfms >> 16) & 0xF) << 4;
+ }
+ c->x86_mask = tfms & 15;
+ if (cap0 & (1<<19))
+ c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
+ }
+
+ early_intel_workaround(c);
+ }
+
+ void __init generic_identify(struct cpuinfo_x86 * c)
+ {
+ u32 tfms, xlvl;
+ int junk;
+
+ if (have_cpuid_p()) {
+ /* Get vendor name */
+ cpuid(0x00000000, &c->cpuid_level,
+ (int *)&c->x86_vendor_id[0],
+ (int *)&c->x86_vendor_id[8],
+ (int *)&c->x86_vendor_id[4]);
+
+ get_cpu_vendor(c, 0);
+ /* Initialize the standard set of capabilities */
+ /* Note that the vendor-specific code below might override */
+
+ /* Intel-defined flags: level 0x00000001 */
+ if ( c->cpuid_level >= 0x00000001 ) {
+ u32 capability, excap;
+ cpuid(0x00000001, &tfms, &junk, &excap, &capability);
+ c->x86_capability[0] = capability;
+ c->x86_capability[4] = excap;
+ c->x86 = (tfms >> 8) & 15;
+ c->x86_model = (tfms >> 4) & 15;
+ if (c->x86 == 0xf) {
+ c->x86 += (tfms >> 20) & 0xff;
+ c->x86_model += ((tfms >> 16) & 0xF) << 4;
+ }
+ c->x86_mask = tfms & 15;
+ } else {
+ /* Have CPUID level 0 only - unheard of */
+ c->x86 = 4;
+ }
+
+ /* AMD-defined flags: level 0x80000001 */
+ xlvl = cpuid_eax(0x80000000);
+ if ( (xlvl & 0xffff0000) == 0x80000000 ) {
+ if ( xlvl >= 0x80000001 ) {
+ c->x86_capability[1] = cpuid_edx(0x80000001);
+ c->x86_capability[6] = cpuid_ecx(0x80000001);
+ }
+ if ( xlvl >= 0x80000004 )
+ get_model_name(c); /* Default name */
+ }
+ }
+ }
+
+ static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
+ {
+ if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
+ /* Disable processor serial number */
+ unsigned long lo,hi;
+ rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
+ lo |= 0x200000;
+ wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
+ printk(KERN_NOTICE "CPU serial number disabled.\n");
+ clear_bit(X86_FEATURE_PN, c->x86_capability);
+
+ /* Disabling the serial number may affect the cpuid level */
+ c->cpuid_level = cpuid_eax(0);
+ }
+ }
+
+ static int __init x86_serial_nr_setup(char *s)
+ {
+ disable_x86_serial_nr = 0;
+ return 1;
+ }
+ __setup("serialnumber", x86_serial_nr_setup);
+
+
+
+ /*
+ * This does the hard work of actually picking apart the CPU stuff...
+ */
+ void __init identify_cpu(struct cpuinfo_x86 *c)
+ {
+ int i;
+
+ c->loops_per_jiffy = loops_per_jiffy;
+ c->x86_cache_size = -1;
+ c->x86_vendor = X86_VENDOR_UNKNOWN;
+ c->cpuid_level = -1; /* CPUID not detected */
+ c->x86_model = c->x86_mask = 0; /* So far unknown... */
+ c->x86_vendor_id[0] = '\0'; /* Unset */
+ c->x86_model_id[0] = '\0'; /* Unset */
+ c->x86_num_cores = 1;
+ memset(&c->x86_capability, 0, sizeof c->x86_capability);
+
+ if (!have_cpuid_p()) {
+ /* First of all, decide if this is a 486 or higher */
+ /* It's a 486 if we can modify the AC flag */
+ if ( flag_is_changeable_p(X86_EFLAGS_AC) )
+ c->x86 = 4;
+ else
+ c->x86 = 3;
+ }
+
+ generic_identify(c);
+
+ printk(KERN_DEBUG "CPU: After generic identify, caps:");
+ for (i = 0; i < NCAPINTS; i++)
+ printk(" %08lx", c->x86_capability[i]);
+ printk("\n");
+
+ if (this_cpu->c_identify) {
+ this_cpu->c_identify(c);
+
+ printk(KERN_DEBUG "CPU: After vendor identify, caps:");
+ for (i = 0; i < NCAPINTS; i++)
+ printk(" %08lx", c->x86_capability[i]);
+ printk("\n");
+ }
+
+ /*
+ * Vendor-specific initialization. In this section we
+ * canonicalize the feature flags, meaning if there are
+ * features a certain CPU supports which CPUID doesn't
+ * tell us, CPUID claiming incorrect flags, or other bugs,
+ * we handle them here.
+ *
+ * At the end of this section, c->x86_capability better
+ * indicate the features this CPU genuinely supports!
+ */
+ if (this_cpu->c_init)
+ this_cpu->c_init(c);
+
+ /* Disable the PN if appropriate */
+ squash_the_stupid_serial_number(c);
+
+ /*
+ * The vendor-specific functions might have changed features. Now
+ * we do "generic changes."
+ */
+
+ /* TSC disabled? */
+ if ( tsc_disable )
+ clear_bit(X86_FEATURE_TSC, c->x86_capability);
+
+ /* FXSR disabled? */
+ if (disable_x86_fxsr) {
+ clear_bit(X86_FEATURE_FXSR, c->x86_capability);
+ clear_bit(X86_FEATURE_XMM, c->x86_capability);
+ }
+
+ if (disable_pse)
+ clear_bit(X86_FEATURE_PSE, c->x86_capability);
+
+ /* If the model name is still unset, do table lookup. */
+ if ( !c->x86_model_id[0] ) {
+ char *p;
+ p = table_lookup_model(c);
+ if ( p )
+ strcpy(c->x86_model_id, p);
+ else
+ /* Last resort... */
+ sprintf(c->x86_model_id, "%02x/%02x",
+ c->x86_vendor, c->x86_model);
+ }
+
+ machine_specific_modify_cpu_capabilities(c);
+
+ /* Now the feature flags better reflect actual CPU features! */
+
+ printk(KERN_DEBUG "CPU: After all inits, caps:");
+ for (i = 0; i < NCAPINTS; i++)
+ printk(" %08lx", c->x86_capability[i]);
+ printk("\n");
+
+ /*
+ * On SMP, boot_cpu_data holds the common feature set between
+ * all CPUs; so make sure that we indicate which features are
+ * common between the CPUs. The first time this routine gets
+ * executed, c == &boot_cpu_data.
+ */
+ if ( c != &boot_cpu_data ) {
+ /* AND the already accumulated flags with these */
+ for ( i = 0 ; i < NCAPINTS ; i++ )
+ boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
+ }
+
+ /* Init Machine Check Exception if available. */
+ #ifdef CONFIG_X86_MCE
+ mcheck_init(c);
+ #endif
+ }
+ /*
+ * Perform early boot up checks for a valid TSC. See arch/i386/kernel/time.c
+ */
+
+ void __init dodgy_tsc(void)
+ {
+ if (( boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX ) ||
+ ( boot_cpu_data.x86_vendor == X86_VENDOR_NSC ))
+ cpu_devs[X86_VENDOR_CYRIX]->c_init(&boot_cpu_data);
+ }
+
+ #ifdef CONFIG_X86_HT
+ void __init detect_ht(struct cpuinfo_x86 *c)
+ {
+ u32 eax, ebx, ecx, edx;
+ int index_lsb, index_msb, tmp;
+ int cpu = smp_processor_id();
+
+ if (!cpu_has(c, X86_FEATURE_HT))
+ return;
+
+ cpuid(1, &eax, &ebx, &ecx, &edx);
+ smp_num_siblings = (ebx & 0xff0000) >> 16;
+
+ if (smp_num_siblings == 1) {
+ printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
+ } else if (smp_num_siblings > 1 ) {
+ index_lsb = 0;
+ index_msb = 31;
+
+ if (smp_num_siblings > NR_CPUS) {
+ printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
+ smp_num_siblings = 1;
+ return;
+ }
+ tmp = smp_num_siblings;
+ while ((tmp & 1) == 0) {
+ tmp >>=1 ;
+ index_lsb++;
+ }
+ tmp = smp_num_siblings;
+ while ((tmp & 0x80000000 ) == 0) {
+ tmp <<=1 ;
+ index_msb--;
+ }
+ if (index_lsb != index_msb )
+ index_msb++;
+ phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
+
+ printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
+ phys_proc_id[cpu]);
+ }
+ }
+ #endif
+
+ void __init print_cpu_info(struct cpuinfo_x86 *c)
+ {
+ char *vendor = NULL;
+
+ if (c->x86_vendor < X86_VENDOR_NUM)
+ vendor = this_cpu->c_vendor;
+ else if (c->cpuid_level >= 0)
+ vendor = c->x86_vendor_id;
+
+ if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
+ printk("%s ", vendor);
+
+ if (!c->x86_model_id[0])
+ printk("%d86", c->x86);
+ else
+ printk("%s", c->x86_model_id);
+
+ if (c->x86_mask || c->cpuid_level >= 0)
+ printk(" stepping %02x\n", c->x86_mask);
+ else
+ printk("\n");
+ }
+
+ cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
+
+ /* This is hacky. :)
+ * We're emulating future behavior.
+ * In the future, the cpu-specific init functions will be called implicitly
+ * via the magic of initcalls.
+ * They will insert themselves into the cpu_devs structure.
+ * Then, when cpu_init() is called, we can just iterate over that array.
+ */
+
+ extern int intel_cpu_init(void);
+ extern int cyrix_init_cpu(void);
+ extern int nsc_init_cpu(void);
+ extern int amd_init_cpu(void);
+ extern int centaur_init_cpu(void);
+ extern int transmeta_init_cpu(void);
+ extern int rise_init_cpu(void);
+ extern int nexgen_init_cpu(void);
+ extern int umc_init_cpu(void);
+ void early_cpu_detect(void);
+
+ void __init early_cpu_init(void)
+ {
+ intel_cpu_init();
+ cyrix_init_cpu();
+ nsc_init_cpu();
+ amd_init_cpu();
+ centaur_init_cpu();
+ transmeta_init_cpu();
+ rise_init_cpu();
+ nexgen_init_cpu();
+ umc_init_cpu();
+ early_cpu_detect();
+
+ #ifdef CONFIG_DEBUG_PAGEALLOC
+ /* pse is not compatible with on-the-fly unmapping,
+ * disable it even if the cpus claim to support it.
+ */
+ clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
+ disable_pse = 1;
+ #endif
+ }
+
+ void __init cpu_gdt_init(struct Xgt_desc_struct *gdt_descr)
+ {
+ unsigned long frames[gdt_descr->size >> PAGE_SHIFT];
+ unsigned long va;
+ int f;
+
+ for (va = gdt_descr->address, f = 0;
+ va < gdt_descr->address + gdt_descr->size;
+ va += PAGE_SIZE, f++) {
+ frames[f] = virt_to_machine(va) >> PAGE_SHIFT;
+ make_page_readonly((void *)va);
+ }
+ flush_page_update_queue();
+ if (HYPERVISOR_set_gdt(frames, gdt_descr->size / 8))
+ BUG();
+ lgdt_finish();
+ }
+
+ /*
+ * cpu_init() initializes state that is per-CPU. Some data is already
+ * initialized (naturally) in the bootstrap process, such as the GDT
+ * and IDT. We reload them nevertheless, this function acts as a
+ * 'CPU state barrier', nothing should get across.
+ */
+ void __init cpu_init (void)
+ {
+ int cpu = smp_processor_id();
+ struct tss_struct * t = &per_cpu(init_tss, cpu);
+ struct thread_struct *thread = ¤t->thread;
+
+ if (cpu_test_and_set(cpu, cpu_initialized)) {
+ printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
+ for (;;) local_irq_enable();
+ }
+ printk(KERN_INFO "Initializing CPU#%d\n", cpu);
+
+ if (cpu_has_vme || cpu_has_de)
+ clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
+ if (tsc_disable && cpu_has_tsc) {
+ printk(KERN_NOTICE "Disabling TSC...\n");
+ /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
+ clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
+ set_in_cr4(X86_CR4_TSD);
+ }
+
+ /*
+ * Set up the per-thread TLS descriptor cache:
+ */
+ memcpy(thread->tls_array, &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN],
+ GDT_ENTRY_TLS_ENTRIES * 8);
+
+ cpu_gdt_init(&cpu_gdt_descr[cpu]);
+
+ /*
+ * Delete NT
+ */
+ __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
+
+ /*
+ * Set up and load the per-CPU TSS and LDT
+ */
+ atomic_inc(&init_mm.mm_count);
+ current->active_mm = &init_mm;
+ if (current->mm)
+ BUG();
+ enter_lazy_tlb(&init_mm, current);
+
+ load_esp0(t, thread);
+
+ load_LDT(&init_mm.context);
+ flush_page_update_queue();
+
+ /* Clear %fs and %gs. */
+ asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
+
+ /* Clear all 6 debug registers: */
+
+ #define CD(register) HYPERVISOR_set_debugreg(register, 0)
+
+ CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);
+
+ #undef CD
+
+ /*
+ * Force FPU initialization:
+ */
+ current_thread_info()->status = 0;
+ clear_used_math();
+ mxcsr_feature_mask_init();
+ }
++
++
++int get_smp_processor_id(void)
++{
++ return smp_processor_id();
++}
--- /dev/null
-#define XEN_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
+ /*
+ * linux/arch/i386/entry.S
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+ /*
+ * entry.S contains the system-call and fault low-level handling routines.
+ * This also contains the timer-interrupt handler, as well as all interrupts
+ * and faults that can result in a task-switch.
+ *
+ * NOTE: This code handles signal-recognition, which happens every time
+ * after a timer-interrupt and after each system call.
+ *
+ * I changed all the .align's to 4 (16 byte alignment), as that's faster
+ * on a 486.
+ *
+ * Stack layout in 'ret_from_system_call':
+ * ptrace needs to have all regs on the stack.
+ * if the order here is changed, it needs to be
+ * updated in fork.c:copy_process, signal.c:do_signal,
+ * ptrace.c and ptrace.h
+ *
+ * 0(%esp) - %ebx
+ * 4(%esp) - %ecx
+ * 8(%esp) - %edx
+ * C(%esp) - %esi
+ * 10(%esp) - %edi
+ * 14(%esp) - %ebp
+ * 18(%esp) - %eax
+ * 1C(%esp) - %ds
+ * 20(%esp) - %es
+ * 24(%esp) - orig_eax
+ * 28(%esp) - %eip
+ * 2C(%esp) - %cs
+ * 30(%esp) - %eflags
+ * 34(%esp) - %oldesp
+ * 38(%esp) - %oldss
+ *
+ * "current" is in register %ebx during any slow entries.
+ */
+
+ #include <linux/config.h>
+ #include <linux/linkage.h>
+ #include <asm/thread_info.h>
+ #include <asm/errno.h>
+ #include <asm/segment.h>
+ #include <asm/smp.h>
+ #include <asm/page.h>
+ #include "irq_vectors.h"
+ #include <asm-xen/xen-public/xen.h>
+
+ #define nr_syscalls ((syscall_table_size)/4)
+
+ EBX = 0x00
+ ECX = 0x04
+ EDX = 0x08
+ ESI = 0x0C
+ EDI = 0x10
+ EBP = 0x14
+ EAX = 0x18
+ DS = 0x1C
+ ES = 0x20
+ ORIG_EAX = 0x24
+ EIP = 0x28
+ CS = 0x2C
+ EVENT_MASK = 0x2E
+ EFLAGS = 0x30
+ OLDESP = 0x34
+ OLDSS = 0x38
+
+ CF_MASK = 0x00000001
+ TF_MASK = 0x00000100
+ IF_MASK = 0x00000200
+ DF_MASK = 0x00000400
+ NT_MASK = 0x00004000
+ VM_MASK = 0x00020000
+
+ /* Offsets into shared_info_t. */
+ #define evtchn_upcall_pending /* 0 */
+ #define evtchn_upcall_mask 1
+
++#define sizeof_vcpu_shift 3
++
++#ifdef CONFIG_SMP
++#define XEN_GET_VCPU_INFO(reg)
++#define preempt_disable(reg) incl TI_preempt_count(reg)
++#define preempt_enable(reg) decl TI_preempt_count(reg)
++#define XEN_LOCK_VCPU_INFO_SMP(reg) preempt_disable(%ebp) ; \
++ movl TI_cpu(%ebp),reg ; \
++ shl $sizeof_vcpu_shift,reg ; \
++ addl HYPERVISOR_shared_info,reg
++#define XEN_UNLOCK_VCPU_INFO_SMP(reg) preempt_enable(%ebp)
++#define XEN_UNLOCK_VCPU_INFO_SMP_fixup .byte 0xff,0xff,0xff
++#define Ux00 0xff
++#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
++#define XEN_BLOCK_EVENTS(reg) XEN_LOCK_VCPU_INFO_SMP(reg) ; \
++ XEN_LOCKED_BLOCK_EVENTS(reg) ; \
++ XEN_UNLOCK_VCPU_INFO_SMP(reg)
++#define XEN_UNBLOCK_EVENTS(reg) XEN_LOCK_VCPU_INFO_SMP(reg) ; \
++ movb $0,evtchn_upcall_mask(reg) ; \
++ XEN_UNLOCK_VCPU_INFO_SMP(reg)
++#define XEN_SAVE_UPCALL_MASK(reg,tmp,off) GET_THREAD_INFO(%ebp) ; \
++ XEN_LOCK_VCPU_INFO_SMP(reg) ; \
++ movb evtchn_upcall_mask(reg), tmp ; \
++ movb tmp, off(%esp) ; \
++ XEN_UNLOCK_VCPU_INFO_SMP(reg)
++#else
+ #define XEN_GET_VCPU_INFO(reg) movl HYPERVISOR_shared_info,reg
-14: XEN_BLOCK_EVENTS(%esi)
++#define XEN_LOCK_VCPU_INFO_SMP(reg)
++#define XEN_UNLOCK_VCPU_INFO_SMP(reg)
++#define XEN_UNLOCK_VCPU_INFO_SMP_fixup
++#define Ux00 0x00
++#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
++#define XEN_BLOCK_EVENTS(reg) XEN_LOCKED_BLOCK_EVENTS(reg)
+ #define XEN_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
+ #define XEN_SAVE_UPCALL_MASK(reg,tmp,off) \
+ movb evtchn_upcall_mask(reg), tmp; \
+ movb tmp, off(%esp)
++#endif
+
+ #define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
+
+ #ifdef CONFIG_PREEMPT
+ #define preempt_stop XEN_BLOCK_EVENTS(%esi)
+ #else
+ #define preempt_stop
+ #define resume_kernel restore_all
+ #endif
+
+ #define SAVE_ALL_NO_EVENTMASK \
+ cld; \
+ pushl %es; \
+ pushl %ds; \
+ pushl %eax; \
+ pushl %ebp; \
+ pushl %edi; \
+ pushl %esi; \
+ pushl %edx; \
+ pushl %ecx; \
+ pushl %ebx; \
+ movl $(__USER_DS), %edx; \
+ movl %edx, %ds; \
+ movl %edx, %es;
+
+ #define SAVE_ALL \
+ SAVE_ALL_NO_EVENTMASK; \
+ XEN_GET_VCPU_INFO(%esi); \
+ XEN_SAVE_UPCALL_MASK(%esi,%dl,EVENT_MASK)
+
+ #define RESTORE_INT_REGS \
+ popl %ebx; \
+ popl %ecx; \
+ popl %edx; \
+ popl %esi; \
+ popl %edi; \
+ popl %ebp; \
+ popl %eax
+
+ #define RESTORE_REGS \
+ RESTORE_INT_REGS; \
+ 1: popl %ds; \
+ 2: popl %es; \
+ .section .fixup,"ax"; \
+ 3: movl $0,(%esp); \
+ jmp 1b; \
+ 4: movl $0,(%esp); \
+ jmp 2b; \
+ .previous; \
+ .section __ex_table,"a";\
+ .align 4; \
+ .long 1b,3b; \
+ .long 2b,4b; \
+ .previous
+
+
+ #define RESTORE_ALL \
+ RESTORE_REGS \
+ addl $4, %esp; \
+ 1: iret; \
+ .section .fixup,"ax"; \
+ 2: movl $(__USER_DS), %edx; \
+ movl %edx, %ds; \
+ movl %edx, %es; \
+ movl $11,%eax; \
+ call do_exit; \
+ .previous; \
+ .section __ex_table,"a";\
+ .align 4; \
+ .long 1b,2b; \
+ .previous
+
+
+ ENTRY(ret_from_fork)
+ pushl %eax
+ call schedule_tail
+ GET_THREAD_INFO(%ebp)
+ popl %eax
+ XEN_GET_VCPU_INFO(%esi)
+ jmp syscall_exit
+
+ /*
+ * Return to user mode is not as complex as all this looks,
+ * but we want the default path for a system call return to
+ * go as quickly as possible which is why some of this is
+ * less clear than it otherwise should be.
+ */
+
+ # userspace resumption stub bypassing syscall exit tracing
+ ALIGN
+ ret_from_exception:
+ preempt_stop
+ ret_from_intr:
+ GET_THREAD_INFO(%ebp)
+ movl EFLAGS(%esp), %eax # mix EFLAGS and CS
+ movb CS(%esp), %al
+ testl $(VM_MASK | 2), %eax
+ jz resume_kernel # returning to kernel or vm86-space
+ ENTRY(resume_userspace)
+ XEN_GET_VCPU_INFO(%esi)
+ XEN_BLOCK_EVENTS(%esi) # make sure we don't miss an interrupt
+ # setting need_resched or sigpending
+ # between sampling and the iret
+ movl TI_flags(%ebp), %ecx
+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
+ # int/exception return?
+ jne work_pending
+ jmp restore_all
+
+ #ifdef CONFIG_PREEMPT
+ ENTRY(resume_kernel)
+ XEN_GET_VCPU_INFO(%esi)
+ XEN_BLOCK_EVENTS(%esi)
+ cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
+ jnz restore_all
+ need_resched:
+ movl TI_flags(%ebp), %ecx # need_resched set ?
+ testb $_TIF_NEED_RESCHED, %cl
+ jz restore_all
+ testb $0xFF,EVENT_MASK(%esp) # interrupts off (exception path) ?
+ jnz restore_all
+ call preempt_schedule_irq
+ jmp need_resched
+ #endif
+
+ /* SYSENTER_RETURN points to after the "sysenter" instruction in
+ the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
+
+ # sysenter call handler stub
+ ENTRY(sysenter_entry)
+ movl TSS_sysenter_esp0(%esp),%esp
+ sysenter_past_esp:
+ sti
+ pushl $(__USER_DS)
+ pushl %ebp
+ pushfl
+ pushl $(__USER_CS)
+ pushl $SYSENTER_RETURN
+
+ /*
+ * Load the potential sixth argument from user stack.
+ * Careful about security.
+ */
+ cmpl $__PAGE_OFFSET-3,%ebp
+ jae syscall_fault
+ 1: movl (%ebp),%ebp
+ .section __ex_table,"a"
+ .align 4
+ .long 1b,syscall_fault
+ .previous
+
+ pushl %eax
+ SAVE_ALL
+ GET_THREAD_INFO(%ebp)
+
+ testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
+ jnz syscall_trace_entry
+ cmpl $(nr_syscalls), %eax
+ jae syscall_badsys
+ call *sys_call_table(,%eax,4)
+ movl %eax,EAX(%esp)
+ cli
+ movl TI_flags(%ebp), %ecx
+ testw $_TIF_ALLWORK_MASK, %cx
+ jne syscall_exit_work
+ /* if something modifies registers it must also disable sysexit */
+ movl EIP(%esp), %edx
+ movl OLDESP(%esp), %ecx
+ xorl %ebp,%ebp
+ sti
+ sysexit
+
+
+ # system call handler stub
+ ENTRY(system_call)
+ pushl %eax # save orig_eax
+ SAVE_ALL
+ GET_THREAD_INFO(%ebp)
+ # system call tracing in operation
+ testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
+ jnz syscall_trace_entry
+ cmpl $(nr_syscalls), %eax
+ jae syscall_badsys
+ syscall_call:
+ call *sys_call_table(,%eax,4)
+ movl %eax,EAX(%esp) # store the return value
+ syscall_exit:
+ XEN_BLOCK_EVENTS(%esi) # make sure we don't miss an interrupt
+ # setting need_resched or sigpending
+ # between sampling and the iret
+ movl TI_flags(%ebp), %ecx
+ testw $_TIF_ALLWORK_MASK, %cx # current->work
+ jne syscall_exit_work
+ restore_all:
+ testl $VM_MASK, EFLAGS(%esp)
+ jnz resume_vm86
+ movb EVENT_MASK(%esp), %al
+ notb %al # %al == ~saved_mask
++ XEN_LOCK_VCPU_INFO_SMP(%esi)
+ andb evtchn_upcall_mask(%esi),%al
+ andb $1,%al # %al == mask & ~saved_mask
+ jnz restore_all_enable_events # != 0 => reenable event delivery
++ XEN_UNLOCK_VCPU_INFO_SMP(%esi)
+ RESTORE_ALL
+
+ resume_vm86:
+ XEN_UNBLOCK_EVENTS(%esi)
+ RESTORE_REGS
+ movl %eax,(%esp)
+ movl $__HYPERVISOR_switch_vm86,%eax
+ int $0x82
+ ud2
+
+ # perform work that needs to be done immediately before resumption
+ ALIGN
+ work_pending:
+ testb $_TIF_NEED_RESCHED, %cl
+ jz work_notifysig
+ work_resched:
+ call schedule
+ XEN_BLOCK_EVENTS(%esi) # make sure we don't miss an interrupt
+ # setting need_resched or sigpending
+ # between sampling and the iret
+ movl TI_flags(%ebp), %ecx
+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
+ # than syscall tracing?
+ jz restore_all
+ testb $_TIF_NEED_RESCHED, %cl
+ jnz work_resched
+
+ work_notifysig: # deal with pending signals and
+ # notify-resume requests
+ testl $VM_MASK, EFLAGS(%esp)
+ movl %esp, %eax
+ jne work_notifysig_v86 # returning to kernel-space or
+ # vm86-space
+ xorl %edx, %edx
+ call do_notify_resume
+ jmp restore_all
+
+ ALIGN
+ work_notifysig_v86:
+ pushl %ecx # save ti_flags for do_notify_resume
+ call save_v86_state # %eax contains pt_regs pointer
+ popl %ecx
+ movl %eax, %esp
+ xorl %edx, %edx
+ call do_notify_resume
+ jmp restore_all
+
+ # perform syscall exit tracing
+ ALIGN
+ syscall_trace_entry:
+ movl $-ENOSYS,EAX(%esp)
+ movl %esp, %eax
+ xorl %edx,%edx
+ call do_syscall_trace
+ movl ORIG_EAX(%esp), %eax
+ cmpl $(nr_syscalls), %eax
+ jnae syscall_call
+ jmp syscall_exit
+
+ # perform syscall exit tracing
+ ALIGN
+ syscall_exit_work:
+ testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
+ jz work_pending
+ XEN_UNBLOCK_EVENTS(%esi) # could let do_syscall_trace() call
+ # schedule() instead
+ movl %esp, %eax
+ movl $1, %edx
+ call do_syscall_trace
+ jmp resume_userspace
+
+ ALIGN
+ syscall_fault:
+ pushl %eax # save orig_eax
+ SAVE_ALL
+ GET_THREAD_INFO(%ebp)
+ movl $-EFAULT,EAX(%esp)
+ jmp resume_userspace
+
+ ALIGN
+ syscall_badsys:
+ movl $-ENOSYS,EAX(%esp)
+ jmp resume_userspace
+
+ #if 0 /* XEN */
+ /*
+ * Build the entry stubs and pointer table with
+ * some assembler magic.
+ */
+ .data
+ ENTRY(interrupt)
+ .text
+
+ vector=0
+ ENTRY(irq_entries_start)
+ .rept NR_IRQS
+ ALIGN
+ 1: pushl $vector-256
+ jmp common_interrupt
+ .data
+ .long 1b
+ .text
+ vector=vector+1
+ .endr
+
+ ALIGN
+ common_interrupt:
+ SAVE_ALL
+ movl %esp,%eax
+ call do_IRQ
+ jmp ret_from_intr
+
+ #define BUILD_INTERRUPT(name, nr) \
+ ENTRY(name) \
+ pushl $nr-256; \
+ SAVE_ALL \
+ movl %esp,%eax; \
+ call smp_/**/name; \
+ jmp ret_from_intr;
+
+ /* The include is where all of the SMP etc. interrupts come from */
+ #include "entry_arch.h"
+ #endif /* XEN */
+
+ ENTRY(divide_error)
+ pushl $0 # no error code
+ pushl $do_divide_error
+ ALIGN
+ error_code:
+ pushl %ds
+ pushl %eax
+ xorl %eax, %eax
+ pushl %ebp
+ pushl %edi
+ pushl %esi
+ pushl %edx
+ decl %eax # eax = -1
+ pushl %ecx
+ pushl %ebx
+ cld
+ movl %es, %ecx
+ movl ES(%esp), %edi # get the function address
+ movl ORIG_EAX(%esp), %edx # get the error code
+ movl %eax, ORIG_EAX(%esp)
+ movl %ecx, ES(%esp)
+ movl $(__USER_DS), %ecx
+ movl %ecx, %ds
+ movl %ecx, %es
+ movl %esp,%eax # pt_regs pointer
+ XEN_GET_VCPU_INFO(%esi)
+ XEN_SAVE_UPCALL_MASK(%esi,%bl,EVENT_MASK)
+ call *%edi
+ jmp ret_from_exception
+
+ # A note on the "critical region" in our callback handler.
+ # We want to avoid stacking callback handlers due to events occurring
+ # during handling of the last event. To do this, we keep events disabled
+ # until we've done all processing. HOWEVER, we must enable events before
+ # popping the stack frame (can't be done atomically) and so it would still
+ # be possible to get enough handler activations to overflow the stack.
+ # Although unlikely, bugs of that kind are hard to track down, so we'd
+ # like to avoid the possibility.
+ # So, on entry to the handler we detect whether we interrupted an
+ # existing activation in its critical region -- if so, we pop the current
+ # activation and restart the handler using the previous one.
+ ENTRY(hypervisor_callback)
+ pushl %eax
+ SAVE_ALL_NO_EVENTMASK
+ movl EIP(%esp),%eax
+ cmpl $scrit,%eax
+ jb 11f
+ cmpl $ecrit,%eax
+ jb critical_region_fixup
+ 11: XEN_GET_VCPU_INFO(%esi)
+ movb $0, EVENT_MASK(%esp)
+ push %esp
+ call evtchn_do_upcall
+ add $4,%esp
+ jmp ret_from_intr
+
+ ALIGN
+ restore_all_enable_events:
+ XEN_UNBLOCK_EVENTS(%esi)
+ scrit: /**** START OF CRITICAL REGION ****/
+ XEN_TEST_PENDING(%esi)
+ jnz 14f # process more events if necessary...
++ XEN_UNLOCK_VCPU_INFO_SMP(%esi)
+ RESTORE_ALL
- mov %esp,%esi
++14: XEN_LOCKED_BLOCK_EVENTS(%esi)
++ XEN_UNLOCK_VCPU_INFO_SMP(%esi)
+ jmp 11b
+ ecrit: /**** END OF CRITICAL REGION ****/
+ # [How we do the fixup]. We want to merge the current stack frame with the
+ # just-interrupted frame. How we do this depends on where in the critical
+ # region the interrupted handler was executing, and so how many saved
+ # registers are in each frame. We do this quickly using the lookup table
+ # 'critical_fixup_table'. For each byte offset in the critical region, it
+ # provides the number of bytes which have already been popped from the
+ # interrupted stack frame.
+ critical_region_fixup:
+ addl $critical_fixup_table-scrit,%eax
+ movzbl (%eax),%eax # %eax contains num bytes popped
- je 16f # skip loop if nothing to copy
-15: subl $4,%esi # pre-decrementing copy loop
++#ifdef CONFIG_SMP
++ cmpb $0xff,%al
++ jne 15f
++ add $1,%al
++ GET_THREAD_INFO(%ebp)
++ XEN_UNLOCK_VCPU_INFO_SMP(%esi)
++15:
++#endif
++ mov %esp,%esi
+ add %eax,%esi # %esi points at end of src region
+ mov %esp,%edi
+ add $0x34,%edi # %edi points at end of dst region
+ mov %eax,%ecx
+ shr $2,%ecx # convert words to bytes
- loop 15b
-16: movl %edi,%esp # final %edi is top of merged stack
++ je 17f # skip loop if nothing to copy
++16: subl $4,%esi # pre-decrementing copy loop
+ subl $4,%edi
+ movl (%esi),%eax
+ movl %eax,(%edi)
- .byte 0x00,0x00,0x00 # testb $0xff,(%esi) = XEN_TEST_PENDING
- .byte 0x00,0x00 # jnz 14f
++ loop 16b
++17: movl %edi,%esp # final %edi is top of merged stack
+ jmp 11b
+
+ critical_fixup_table:
- .byte 0x00,0x00,0x00,0x00 # movb $1,1(%esi)
++ .byte Ux00,Ux00,Ux00 # testb $0xff,(%esi) = XEN_TEST_PENDING
++ .byte Ux00,Ux00 # jnz 14f
++ XEN_UNLOCK_VCPU_INFO_SMP_fixup
+ .byte 0x00 # pop %ebx
+ .byte 0x04 # pop %ecx
+ .byte 0x08 # pop %edx
+ .byte 0x0c # pop %esi
+ .byte 0x10 # pop %edi
+ .byte 0x14 # pop %ebp
+ .byte 0x18 # pop %eax
+ .byte 0x1c # pop %ds
+ .byte 0x20 # pop %es
+ .byte 0x24,0x24,0x24 # add $4,%esp
+ .byte 0x28 # iret
++ .byte Ux00,Ux00,Ux00,Ux00 # movb $1,1(%esi)
++ XEN_UNLOCK_VCPU_INFO_SMP_fixup
+ .byte 0x00,0x00 # jmp 11b
+
+ # Hypervisor uses this for application faults while it executes.
+ ENTRY(failsafe_callback)
+ 1: popl %ds
+ 2: popl %es
+ 3: popl %fs
+ 4: popl %gs
+ subl $4,%esp
+ SAVE_ALL
+ jmp ret_from_exception
+ .section .fixup,"ax"; \
+ 6: movl $0,(%esp); \
+ jmp 1b; \
+ 7: movl $0,(%esp); \
+ jmp 2b; \
+ 8: movl $0,(%esp); \
+ jmp 3b; \
+ 9: movl $0,(%esp); \
+ jmp 4b; \
+ .previous; \
+ .section __ex_table,"a";\
+ .align 4; \
+ .long 1b,6b; \
+ .long 2b,7b; \
+ .long 3b,8b; \
+ .long 4b,9b; \
+ .previous
+
+ ENTRY(coprocessor_error)
+ pushl $0
+ pushl $do_coprocessor_error
+ jmp error_code
+
+ ENTRY(simd_coprocessor_error)
+ pushl $0
+ pushl $do_simd_coprocessor_error
+ jmp error_code
+
+ ENTRY(device_not_available)
+ pushl $-1 # mark this as an int
+ SAVE_ALL
+ preempt_stop
+ call math_state_restore
+ jmp ret_from_exception
+
+ /*
+ * Debug traps and NMI can happen at the one SYSENTER instruction
+ * that sets up the real kernel stack. Check here, since we can't
+ * allow the wrong stack to be used.
+ *
+ * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
+ * already pushed 3 words if it hits on the sysenter instruction:
+ * eflags, cs and eip.
+ *
+ * We just load the right stack, and push the three (known) values
+ * by hand onto the new stack - while updating the return eip past
+ * the instruction that would have done it for sysenter.
+ */
+ #define FIX_STACK(offset, ok, label) \
+ cmpw $__KERNEL_CS,4(%esp); \
+ jne ok; \
+ label: \
+ movl TSS_sysenter_esp0+offset(%esp),%esp; \
+ pushfl; \
+ pushl $__KERNEL_CS; \
+ pushl $sysenter_past_esp
+
+ ENTRY(debug)
+ cmpl $sysenter_entry,(%esp)
+ jne debug_stack_correct
+ FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
+ debug_stack_correct:
+ pushl $-1 # mark this as an int
+ SAVE_ALL
+ xorl %edx,%edx # error code 0
+ movl %esp,%eax # pt_regs pointer
+ call do_debug
+ testl %eax,%eax
+ jnz restore_all
+ jmp ret_from_exception
+
+ #if 0 /* XEN */
+ /*
+ * NMI is doubly nasty. It can happen _while_ we're handling
+ * a debug fault, and the debug fault hasn't yet been able to
+ * clear up the stack. So we first check whether we got an
+ * NMI on the sysenter entry path, but after that we need to
+ * check whether we got an NMI on the debug path where the debug
+ * fault happened on the sysenter path.
+ */
+ ENTRY(nmi)
+ cmpl $sysenter_entry,(%esp)
+ je nmi_stack_fixup
+ pushl %eax
+ movl %esp,%eax
+ /* Do not access memory above the end of our stack page,
+ * it might not exist.
+ */
+ andl $(THREAD_SIZE-1),%eax
+ cmpl $(THREAD_SIZE-20),%eax
+ popl %eax
+ jae nmi_stack_correct
+ cmpl $sysenter_entry,12(%esp)
+ je nmi_debug_stack_check
+ nmi_stack_correct:
+ pushl %eax
+ SAVE_ALL
+ xorl %edx,%edx # zero error code
+ movl %esp,%eax # pt_regs pointer
+ call do_nmi
+ RESTORE_ALL
+
+ nmi_stack_fixup:
+ FIX_STACK(12,nmi_stack_correct, 1)
+ jmp nmi_stack_correct
+ nmi_debug_stack_check:
+ cmpw $__KERNEL_CS,16(%esp)
+ jne nmi_stack_correct
+ cmpl $debug - 1,(%esp)
+ jle nmi_stack_correct
+ cmpl $debug_esp_fix_insn,(%esp)
+ jle nmi_debug_stack_fixup
+ nmi_debug_stack_fixup:
+ FIX_STACK(24,nmi_stack_correct, 1)
+ jmp nmi_stack_correct
+ #endif /* XEN */
+
+ ENTRY(int3)
+ pushl $-1 # mark this as an int
+ SAVE_ALL
+ xorl %edx,%edx # zero error code
+ movl %esp,%eax # pt_regs pointer
+ call do_int3
+ testl %eax,%eax
+ jnz restore_all
+ jmp ret_from_exception
+
+ ENTRY(overflow)
+ pushl $0
+ pushl $do_overflow
+ jmp error_code
+
+ ENTRY(bounds)
+ pushl $0
+ pushl $do_bounds
+ jmp error_code
+
+ ENTRY(invalid_op)
+ pushl $0
+ pushl $do_invalid_op
+ jmp error_code
+
+ ENTRY(coprocessor_segment_overrun)
+ pushl $0
+ pushl $do_coprocessor_segment_overrun
+ jmp error_code
+
+ ENTRY(invalid_TSS)
+ pushl $do_invalid_TSS
+ jmp error_code
+
+ ENTRY(segment_not_present)
+ pushl $do_segment_not_present
+ jmp error_code
+
+ ENTRY(stack_segment)
+ pushl $do_stack_segment
+ jmp error_code
+
+ ENTRY(general_protection)
+ pushl $do_general_protection
+ jmp error_code
+
+ ENTRY(alignment_check)
+ pushl $do_alignment_check
+ jmp error_code
+
+ # This handler is special, because it gets an extra value on its stack,
+ # which is the linear faulting address.
+ # fastcall register usage: %eax = pt_regs, %edx = error code,
+ # %ecx = fault address
+ ENTRY(page_fault)
+ pushl %ds
+ pushl %eax
+ xorl %eax, %eax
+ pushl %ebp
+ pushl %edi
+ pushl %esi
+ pushl %edx
+ decl %eax /* eax = -1 */
+ pushl %ecx
+ pushl %ebx
+ cld
+ movl %es,%edi
+ movl ES(%esp), %ecx /* get the faulting address */
+ movl ORIG_EAX(%esp), %edx /* get the error code */
+ movl %eax, ORIG_EAX(%esp)
+ movl %edi, ES(%esp)
+ movl $(__KERNEL_DS),%eax
+ movl %eax, %ds
+ movl %eax, %es
+ movl %esp,%eax /* pt_regs pointer */
+ XEN_GET_VCPU_INFO(%esi)
+ XEN_SAVE_UPCALL_MASK(%esi,%bl,EVENT_MASK)
+ call do_page_fault
+ jmp ret_from_exception
+
+ #ifdef CONFIG_X86_MCE
+ ENTRY(machine_check)
+ pushl $0
+ pushl machine_check_vector
+ jmp error_code
+ #endif
+
+ ENTRY(fixup_4gb_segment)
+ pushl $do_fixup_4gb_segment
+ jmp error_code
+
+ .data
+ ENTRY(sys_call_table)
+ .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
+ .long sys_exit
+ .long sys_fork
+ .long sys_read
+ .long sys_write
+ .long sys_open /* 5 */
+ .long sys_close
+ .long sys_waitpid
+ .long sys_creat
+ .long sys_link
+ .long sys_unlink /* 10 */
+ .long sys_execve
+ .long sys_chdir
+ .long sys_time
+ .long sys_mknod
+ .long sys_chmod /* 15 */
+ .long sys_lchown16
+ .long sys_ni_syscall /* old break syscall holder */
+ .long sys_stat
+ .long sys_lseek
+ .long sys_getpid /* 20 */
+ .long sys_mount
+ .long sys_oldumount
+ .long sys_setuid16
+ .long sys_getuid16
+ .long sys_stime /* 25 */
+ .long sys_ptrace
+ .long sys_alarm
+ .long sys_fstat
+ .long sys_pause
+ .long sys_utime /* 30 */
+ .long sys_ni_syscall /* old stty syscall holder */
+ .long sys_ni_syscall /* old gtty syscall holder */
+ .long sys_access
+ .long sys_nice
+ .long sys_ni_syscall /* 35 - old ftime syscall holder */
+ .long sys_sync
+ .long sys_kill
+ .long sys_rename
+ .long sys_mkdir
+ .long sys_rmdir /* 40 */
+ .long sys_dup
+ .long sys_pipe
+ .long sys_times
+ .long sys_ni_syscall /* old prof syscall holder */
+ .long sys_brk /* 45 */
+ .long sys_setgid16
+ .long sys_getgid16
+ .long sys_signal
+ .long sys_geteuid16
+ .long sys_getegid16 /* 50 */
+ .long sys_acct
+ .long sys_umount /* recycled never used phys() */
+ .long sys_ni_syscall /* old lock syscall holder */
+ .long sys_ioctl
+ .long sys_fcntl /* 55 */
+ .long sys_ni_syscall /* old mpx syscall holder */
+ .long sys_setpgid
+ .long sys_ni_syscall /* old ulimit syscall holder */
+ .long sys_olduname
+ .long sys_umask /* 60 */
+ .long sys_chroot
+ .long sys_ustat
+ .long sys_dup2
+ .long sys_getppid
+ .long sys_getpgrp /* 65 */
+ .long sys_setsid
+ .long sys_sigaction
+ .long sys_sgetmask
+ .long sys_ssetmask
+ .long sys_setreuid16 /* 70 */
+ .long sys_setregid16
+ .long sys_sigsuspend
+ .long sys_sigpending
+ .long sys_sethostname
+ .long sys_setrlimit /* 75 */
+ .long sys_old_getrlimit
+ .long sys_getrusage
+ .long sys_gettimeofday
+ .long sys_settimeofday
+ .long sys_getgroups16 /* 80 */
+ .long sys_setgroups16
+ .long old_select
+ .long sys_symlink
+ .long sys_lstat
+ .long sys_readlink /* 85 */
+ .long sys_uselib
+ .long sys_swapon
+ .long sys_reboot
+ .long old_readdir
+ .long old_mmap /* 90 */
+ .long sys_munmap
+ .long sys_truncate
+ .long sys_ftruncate
+ .long sys_fchmod
+ .long sys_fchown16 /* 95 */
+ .long sys_getpriority
+ .long sys_setpriority
+ .long sys_ni_syscall /* old profil syscall holder */
+ .long sys_statfs
+ .long sys_fstatfs /* 100 */
+ .long sys_ioperm
+ .long sys_socketcall
+ .long sys_syslog
+ .long sys_setitimer
+ .long sys_getitimer /* 105 */
+ .long sys_newstat
+ .long sys_newlstat
+ .long sys_newfstat
+ .long sys_uname
+ .long sys_iopl /* 110 */
+ .long sys_vhangup
+ .long sys_ni_syscall /* old "idle" system call */
+ .long sys_vm86old
+ .long sys_wait4
+ .long sys_swapoff /* 115 */
+ .long sys_sysinfo
+ .long sys_ipc
+ .long sys_fsync
+ .long sys_sigreturn
+ .long sys_clone /* 120 */
+ .long sys_setdomainname
+ .long sys_newuname
+ .long sys_modify_ldt
+ .long sys_adjtimex
+ .long sys_mprotect /* 125 */
+ .long sys_sigprocmask
+ .long sys_ni_syscall /* old "create_module" */
+ .long sys_init_module
+ .long sys_delete_module
+ .long sys_ni_syscall /* 130: old "get_kernel_syms" */
+ .long sys_quotactl
+ .long sys_getpgid
+ .long sys_fchdir
+ .long sys_bdflush
+ .long sys_sysfs /* 135 */
+ .long sys_personality
+ .long sys_ni_syscall /* reserved for afs_syscall */
+ .long sys_setfsuid16
+ .long sys_setfsgid16
+ .long sys_llseek /* 140 */
+ .long sys_getdents
+ .long sys_select
+ .long sys_flock
+ .long sys_msync
+ .long sys_readv /* 145 */
+ .long sys_writev
+ .long sys_getsid
+ .long sys_fdatasync
+ .long sys_sysctl
+ .long sys_mlock /* 150 */
+ .long sys_munlock
+ .long sys_mlockall
+ .long sys_munlockall
+ .long sys_sched_setparam
+ .long sys_sched_getparam /* 155 */
+ .long sys_sched_setscheduler
+ .long sys_sched_getscheduler
+ .long sys_sched_yield
+ .long sys_sched_get_priority_max
+ .long sys_sched_get_priority_min /* 160 */
+ .long sys_sched_rr_get_interval
+ .long sys_nanosleep
+ .long sys_mremap
+ .long sys_setresuid16
+ .long sys_getresuid16 /* 165 */
+ .long sys_vm86
+ .long sys_ni_syscall /* Old sys_query_module */
+ .long sys_poll
+ .long sys_nfsservctl
+ .long sys_setresgid16 /* 170 */
+ .long sys_getresgid16
+ .long sys_prctl
+ .long sys_rt_sigreturn
+ .long sys_rt_sigaction
+ .long sys_rt_sigprocmask /* 175 */
+ .long sys_rt_sigpending
+ .long sys_rt_sigtimedwait
+ .long sys_rt_sigqueueinfo
+ .long sys_rt_sigsuspend
+ .long sys_pread64 /* 180 */
+ .long sys_pwrite64
+ .long sys_chown16
+ .long sys_getcwd
+ .long sys_capget
+ .long sys_capset /* 185 */
+ .long sys_sigaltstack
+ .long sys_sendfile
+ .long sys_ni_syscall /* reserved for streams1 */
+ .long sys_ni_syscall /* reserved for streams2 */
+ .long sys_vfork /* 190 */
+ .long sys_getrlimit
+ .long sys_mmap2
+ .long sys_truncate64
+ .long sys_ftruncate64
+ .long sys_stat64 /* 195 */
+ .long sys_lstat64
+ .long sys_fstat64
+ .long sys_lchown
+ .long sys_getuid
+ .long sys_getgid /* 200 */
+ .long sys_geteuid
+ .long sys_getegid
+ .long sys_setreuid
+ .long sys_setregid
+ .long sys_getgroups /* 205 */
+ .long sys_setgroups
+ .long sys_fchown
+ .long sys_setresuid
+ .long sys_getresuid
+ .long sys_setresgid /* 210 */
+ .long sys_getresgid
+ .long sys_chown
+ .long sys_setuid
+ .long sys_setgid
+ .long sys_setfsuid /* 215 */
+ .long sys_setfsgid
+ .long sys_pivot_root
+ .long sys_mincore
+ .long sys_madvise
+ .long sys_getdents64 /* 220 */
+ .long sys_fcntl64
+ .long sys_ni_syscall /* reserved for TUX */
+ .long sys_ni_syscall
+ .long sys_gettid
+ .long sys_readahead /* 225 */
+ .long sys_setxattr
+ .long sys_lsetxattr
+ .long sys_fsetxattr
+ .long sys_getxattr
+ .long sys_lgetxattr /* 230 */
+ .long sys_fgetxattr
+ .long sys_listxattr
+ .long sys_llistxattr
+ .long sys_flistxattr
+ .long sys_removexattr /* 235 */
+ .long sys_lremovexattr
+ .long sys_fremovexattr
+ .long sys_tkill
+ .long sys_sendfile64
+ .long sys_futex /* 240 */
+ .long sys_sched_setaffinity
+ .long sys_sched_getaffinity
+ .long sys_set_thread_area
+ .long sys_get_thread_area
+ .long sys_io_setup /* 245 */
+ .long sys_io_destroy
+ .long sys_io_getevents
+ .long sys_io_submit
+ .long sys_io_cancel
+ .long sys_fadvise64 /* 250 */
+ .long sys_ni_syscall
+ .long sys_exit_group
+ .long sys_lookup_dcookie
+ .long sys_epoll_create
+ .long sys_epoll_ctl /* 255 */
+ .long sys_epoll_wait
+ .long sys_remap_file_pages
+ .long sys_set_tid_address
+ .long sys_timer_create
+ .long sys_timer_settime /* 260 */
+ .long sys_timer_gettime
+ .long sys_timer_getoverrun
+ .long sys_timer_delete
+ .long sys_clock_settime
+ .long sys_clock_gettime /* 265 */
+ .long sys_clock_getres
+ .long sys_clock_nanosleep
+ .long sys_statfs64
+ .long sys_fstatfs64
+ .long sys_tgkill /* 270 */
+ .long sys_utimes
+ .long sys_fadvise64_64
+ .long sys_ni_syscall /* sys_vserver */
+ .long sys_mbind
+ .long sys_get_mempolicy
+ .long sys_set_mempolicy
+ .long sys_mq_open
+ .long sys_mq_unlink
+ .long sys_mq_timedsend
+ .long sys_mq_timedreceive /* 280 */
+ .long sys_mq_notify
+ .long sys_mq_getsetattr
+ .long sys_ni_syscall /* reserved for kexec */
+ .long sys_waitid
+ .long sys_ni_syscall /* 285 */ /* available */
+ .long sys_add_key
+ .long sys_request_key
+ .long sys_keyctl
+
+ syscall_table_size=(.-sys_call_table)
--- /dev/null
- .ascii "GUEST_OS=linux,GUEST_VER=2.6,XEN_VER=2.0,VIRT_BASE=0xC0000000"
+
+ #include <linux/config.h>
+
+ .section __xen_guest
- /* Set up the stack pointer */
- lss stack_start,%esp
-
++ .ascii "GUEST_OS=linux,GUEST_VER=2.6,XEN_VER=3.0,VIRT_BASE=0xC0000000"
+ .ascii ",LOADER=generic"
+ .ascii ",PT_MODE_WRITABLE"
+ .byte 0
+
+ .text
+ #include <linux/threads.h>
+ #include <linux/linkage.h>
+ #include <asm/segment.h>
+ #include <asm/thread_info.h>
+ #include <asm/asm_offsets.h>
+ #include <asm-xen/xen-public/arch-x86_32.h>
+
+ /*
+ * References to members of the new_cpu_data structure.
+ */
+
+ #define X86 new_cpu_data+CPUINFO_x86
+ #define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor
+ #define X86_MODEL new_cpu_data+CPUINFO_x86_model
+ #define X86_MASK new_cpu_data+CPUINFO_x86_mask
+ #define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math
+ #define X86_CPUID new_cpu_data+CPUINFO_cpuid_level
+ #define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
+ #define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id
+
+ ENTRY(startup_32)
+ cld
+
+ /* Copy the necessary stuff from xen_start_info structure. */
+ mov $xen_start_info_union,%edi
+ mov $128,%ecx
+ rep movsl
+
++#ifdef CONFIG_SMP
++ENTRY(startup_32_smp)
++ cld
++#endif /* CONFIG_SMP */
++
++ /* Set up the stack pointer */
++ lss stack_start,%esp
++
+ checkCPUtype:
+
+ /* get vendor info */
+ xorl %eax,%eax # call CPUID with 0 -> return vendor ID
+ cpuid
+ movl %eax,X86_CPUID # save CPUID level
+ movl %ebx,X86_VENDOR_ID # lo 4 chars
+ movl %edx,X86_VENDOR_ID+4 # next 4 chars
+ movl %ecx,X86_VENDOR_ID+8 # last 4 chars
+
+ movl $1,%eax # Use the CPUID instruction to get CPU type
+ cpuid
+ movb %al,%cl # save reg for future use
+ andb $0x0f,%ah # mask processor family
+ movb %ah,X86
+ andb $0xf0,%al # mask model
+ shrb $4,%al
+ movb %al,X86_MODEL
+ andb $0x0f,%cl # mask mask revision
+ movb %cl,X86_MASK
+ movl %edx,X86_CAPABILITY
+
++ incb ready
++
+ xorl %eax,%eax # Clear FS/GS and LDT
+ movl %eax,%fs
+ movl %eax,%gs
+ cld # gcc2 wants the direction flag cleared at all times
+
++#ifdef CONFIG_SMP
++ movb ready, %cl
++ cmpb $1,%cl
++ je 1f # the first CPU calls start_kernel
++ # all other CPUs call initialize_secondary
++ call initialize_secondary
++ jmp L6
++1:
++#endif /* CONFIG_SMP */
+ call start_kernel
+ L6:
+ jmp L6 # main should never return here, but
+ # just in case, we know what happens.
+
+ ENTRY(lgdt_finish)
+ movl $(__KERNEL_DS),%eax # reload all the segment registers
+ movw %ax,%ss # after changing gdt.
+
+ movl $(__USER_DS),%eax # DS/ES contains default USER segment
+ movw %ax,%ds
+ movw %ax,%es
+
+ popl %eax # reload CS by intersegment return
+ pushl $(__KERNEL_CS)
+ pushl %eax
+ lret
+
+ ENTRY(stack_start)
+ .long init_thread_union+THREAD_SIZE
+ .long __BOOT_DS
+
++ready: .byte 0
++
+ .globl idt_descr
+ .globl cpu_gdt_descr
+
+ ALIGN
+ .word 0 # 32-bit align idt_desc.address
+ idt_descr:
+ .word IDT_ENTRIES*8-1 # idt contains 256 entries
+ .long idt_table
+
+ # boot GDT descriptor (later on used by CPU#0):
+ .word 0 # 32 bit align gdt_desc.address
+ cpu_gdt_descr:
+ .word GDT_SIZE
+ .long cpu_gdt_table
+
+ .fill NR_CPUS-1,8,0 # space for the other GDT descriptors
+
+ .org 0x1000
+ ENTRY(empty_zero_page)
+
+ .org 0x2000
+ ENTRY(swapper_pg_dir)
+
+ .org 0x3000
+ ENTRY(cpu_gdt_table)
+ .quad 0x0000000000000000 /* NULL descriptor */
+ .quad 0x0000000000000000 /* 0x0b reserved */
+ .quad 0x0000000000000000 /* 0x13 reserved */
+ .quad 0x0000000000000000 /* 0x1b reserved */
+ .quad 0x0000000000000000 /* 0x20 unused */
+ .quad 0x0000000000000000 /* 0x28 unused */
+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
+ .quad 0x0000000000000000 /* 0x4b reserved */
+ .quad 0x0000000000000000 /* 0x53 reserved */
+ .quad 0x0000000000000000 /* 0x5b reserved */
+
+ .quad 0x00cfbb000000c3ff /* 0x60 kernel 4GB code at 0x00000000 */
+ .quad 0x00cfb3000000c3ff /* 0x68 kernel 4GB data at 0x00000000 */
+ .quad 0x00cffb000000c3ff /* 0x73 user 4GB code at 0x00000000 */
+ .quad 0x00cff3000000c3ff /* 0x7b user 4GB data at 0x00000000 */
+
+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
+
+ /* Segments used for calling PnP BIOS */
+ .quad 0x0000000000000000 /* 0x90 32-bit code */
+ .quad 0x0000000000000000 /* 0x98 16-bit code */
+ .quad 0x0000000000000000 /* 0xa0 16-bit data */
+ .quad 0x0000000000000000 /* 0xa8 16-bit data */
+ .quad 0x0000000000000000 /* 0xb0 16-bit data */
+ /*
+ * The APM segments have byte granularity and their bases
+ * and limits are set at run time.
+ */
+ .quad 0x0000000000000000 /* 0xb8 APM CS code */
+ .quad 0x0000000000000000 /* 0xc0 APM CS 16 code (16 bit) */
+ .quad 0x0000000000000000 /* 0xc8 APM DS data */
+
+ .quad 0x0000000000000000 /* 0xd0 - unused */
+ .quad 0x0000000000000000 /* 0xd8 - unused */
+ .quad 0x0000000000000000 /* 0xe0 - unused */
+ .quad 0x0000000000000000 /* 0xe8 - unused */
+ .quad 0x0000000000000000 /* 0xf0 - unused */
+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
+ .fill GDT_ENTRIES-32,8,0
+
+ .org 0x4000
+ ENTRY(default_ldt)
+
+ .org 0x5000
+ /*
+ * Real beginning of normal "text" segment
+ */
+ ENTRY(stext)
+ ENTRY(_stext)
--- /dev/null
- int cpu = smp_processor_id();
+ /*
+ * linux/arch/i386/kernel/process.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ *
+ * Pentium III FXSR, SSE support
+ * Gareth Hughes <gareth@valinux.com>, May 2000
+ */
+
+ /*
+ * This file handles the architecture-dependent parts of process handling..
+ */
+
+ #include <stdarg.h>
+
+ #include <linux/errno.h>
+ #include <linux/sched.h>
+ #include <linux/fs.h>
+ #include <linux/kernel.h>
+ #include <linux/mm.h>
+ #include <linux/elfcore.h>
+ #include <linux/smp.h>
+ #include <linux/smp_lock.h>
+ #include <linux/stddef.h>
+ #include <linux/slab.h>
+ #include <linux/vmalloc.h>
+ #include <linux/user.h>
+ #include <linux/a.out.h>
+ #include <linux/interrupt.h>
+ #include <linux/config.h>
+ #include <linux/utsname.h>
+ #include <linux/delay.h>
+ #include <linux/reboot.h>
+ #include <linux/init.h>
+ #include <linux/mc146818rtc.h>
+ #include <linux/module.h>
+ #include <linux/kallsyms.h>
+ #include <linux/ptrace.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+ #include <asm/system.h>
+ #include <asm/io.h>
+ #include <asm/ldt.h>
+ #include <asm/processor.h>
+ #include <asm/i387.h>
+ #include <asm/irq.h>
+ #include <asm/desc.h>
+ #include <asm-xen/multicall.h>
+ #include <asm-xen/xen-public/dom0_ops.h>
+ #ifdef CONFIG_MATH_EMULATION
+ #include <asm/math_emu.h>
+ #endif
+
+ #include <linux/irq.h>
+ #include <linux/err.h>
+
+ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
+
+ int hlt_counter;
+
+ unsigned long boot_option_idle_override = 0;
+ EXPORT_SYMBOL(boot_option_idle_override);
+
+ /*
+ * Return saved PC of a blocked thread.
+ */
+ unsigned long thread_saved_pc(struct task_struct *tsk)
+ {
+ return ((unsigned long *)tsk->thread.esp)[3];
+ }
+
+ /*
+ * Powermanagement idle function, if any..
+ */
+ void (*pm_idle)(void);
+ static cpumask_t cpu_idle_map;
+
+ void disable_hlt(void)
+ {
+ hlt_counter++;
+ }
+
+ EXPORT_SYMBOL(disable_hlt);
+
+ void enable_hlt(void)
+ {
+ hlt_counter--;
+ }
+
+ EXPORT_SYMBOL(enable_hlt);
+
+ /* XXX XEN doesn't use default_idle(), poll_idle(). Use xen_idle() instead. */
+ extern int set_timeout_timer(void);
+ void xen_idle(void)
+ {
- unsigned long eflags;
++ int cpu;
+
+ local_irq_disable();
+
++ cpu = smp_processor_id();
+ if (rcu_pending(cpu))
+ rcu_check_callbacks(cpu, 0);
+
+ if (need_resched()) {
+ local_irq_enable();
+ } else if (set_timeout_timer() == 0) {
+ /* NB. Blocking reenable events in a race-free manner. */
+ HYPERVISOR_block();
+ } else {
+ local_irq_enable();
+ HYPERVISOR_yield();
+ }
+ }
+
+ /*
+ * The idle thread. There's no useful work to be
+ * done, so just try to conserve power and have a
+ * low exit latency (ie sit in a loop waiting for
+ * somebody to say that they'd like to reschedule)
+ */
+ void cpu_idle (void)
+ {
+ int cpu = _smp_processor_id();
+
+ /* endless idle loop with no priority at all */
+ while (1) {
+ while (!need_resched()) {
+
+ if (cpu_isset(cpu, cpu_idle_map))
+ cpu_clear(cpu, cpu_idle_map);
+ rmb();
+
+ irq_stat[cpu].idle_timestamp = jiffies;
+ xen_idle();
+ }
+ schedule();
+ }
+ }
+
+ void cpu_idle_wait(void)
+ {
+ int cpu;
+ cpumask_t map;
+
+ for_each_online_cpu(cpu)
+ cpu_set(cpu, cpu_idle_map);
+
+ wmb();
+ do {
+ ssleep(1);
+ cpus_and(map, cpu_idle_map, cpu_online_map);
+ } while (!cpus_empty(map));
+ }
+ EXPORT_SYMBOL_GPL(cpu_idle_wait);
+
+ /* XXX XEN doesn't use mwait_idle(), select_idle_routine(), idle_setup(). */
+ /* Always use xen_idle() instead. */
+ void __init select_idle_routine(const struct cpuinfo_x86 *c) {}
+
+ void show_regs(struct pt_regs * regs)
+ {
+ printk("\n");
+ printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
+ printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
+ print_symbol("EIP is at %s\n", regs->eip);
+
+ if (regs->xcs & 2)
+ printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
+ printk(" EFLAGS: %08lx %s (%s)\n",
+ regs->eflags, print_tainted(),UTS_RELEASE);
+ printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
+ regs->eax,regs->ebx,regs->ecx,regs->edx);
+ printk("ESI: %08lx EDI: %08lx EBP: %08lx",
+ regs->esi, regs->edi, regs->ebp);
+ printk(" DS: %04x ES: %04x\n",
+ 0xffff & regs->xds,0xffff & regs->xes);
+
+ show_trace(NULL, ®s->esp);
+ }
+
+ /*
+ * This gets run with %ebx containing the
+ * function to call, and %edx containing
+ * the "args".
+ */
+ extern void kernel_thread_helper(void);
+ __asm__(".section .text\n"
+ ".align 4\n"
+ "kernel_thread_helper:\n\t"
+ "movl %edx,%eax\n\t"
+ "pushl %edx\n\t"
+ "call *%ebx\n\t"
+ "pushl %eax\n\t"
+ "call do_exit\n"
+ ".previous");
+
+ /*
+ * Create a kernel thread
+ */
+ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+ {
+ struct pt_regs regs;
+
+ memset(®s, 0, sizeof(regs));
+
+ regs.ebx = (unsigned long) fn;
+ regs.edx = (unsigned long) arg;
+
+ regs.xds = __USER_DS;
+ regs.xes = __USER_DS;
+ regs.orig_eax = -1;
+ regs.eip = (unsigned long) kernel_thread_helper;
+ regs.xcs = __KERNEL_CS;
+ regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
+
+ /* Ok, create the new process.. */
+ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL);
+ }
+
+ /*
+ * Free current thread data structures etc..
+ */
+ void exit_thread(void)
+ {
+ struct task_struct *tsk = current;
+ struct thread_struct *t = &tsk->thread;
+
+ /* The process may have allocated an io port bitmap... nuke it. */
+ if (unlikely(NULL != t->io_bitmap_ptr)) {
+ int cpu = get_cpu();
+ struct tss_struct *tss = &per_cpu(init_tss, cpu);
+
+ kfree(t->io_bitmap_ptr);
+ t->io_bitmap_ptr = NULL;
+ /*
+ * Careful, clear this in the TSS too:
+ */
+ memset(tss->io_bitmap, 0xff, tss->io_bitmap_max);
+ t->io_bitmap_max = 0;
+ tss->io_bitmap_owner = NULL;
+ tss->io_bitmap_max = 0;
+ tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
+ put_cpu();
+ }
+ }
+
+ void flush_thread(void)
+ {
+ struct task_struct *tsk = current;
+
+ memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
+ memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
+ /*
+ * Forget coprocessor state..
+ */
+ clear_fpu(tsk);
+ clear_used_math();
+ }
+
+ void release_thread(struct task_struct *dead_task)
+ {
+ if (dead_task->mm) {
+ // temporary debugging check
+ if (dead_task->mm->context.size) {
+ printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
+ dead_task->comm,
+ dead_task->mm->context.ldt,
+ dead_task->mm->context.size);
+ BUG();
+ }
+ }
+
+ release_vm86_irqs(dead_task);
+ }
+
+ /*
+ * This gets called before we allocate a new thread and copy
+ * the current task into it.
+ */
+ void prepare_to_copy(struct task_struct *tsk)
+ {
+ unlazy_fpu(tsk);
+ }
+
+ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
+ unsigned long unused,
+ struct task_struct * p, struct pt_regs * regs)
+ {
+ struct pt_regs * childregs;
+ struct task_struct *tsk;
+ int err;
-
- __asm__ __volatile__ ( "pushfl; popl %0" : "=r" (eflags) : );
- p->thread.io_pl = (eflags >> 12) & 3;
+
+ childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
+ *childregs = *regs;
+ childregs->eax = 0;
+ childregs->esp = esp;
+
+ p->thread.esp = (unsigned long) childregs;
+ p->thread.esp0 = (unsigned long) (childregs+1);
+
+ p->thread.eip = (unsigned long) ret_from_fork;
+
+ savesegment(fs,p->thread.fs);
+ savesegment(gs,p->thread.gs);
+
+ tsk = current;
+ if (unlikely(NULL != tsk->thread.io_bitmap_ptr)) {
+ p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
+ if (!p->thread.io_bitmap_ptr) {
+ p->thread.io_bitmap_max = 0;
+ return -ENOMEM;
+ }
+ memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr,
+ IO_BITMAP_BYTES);
+ }
+
+ /*
+ * Set a new TLS for the child thread?
+ */
+ if (clone_flags & CLONE_SETTLS) {
+ struct desc_struct *desc;
+ struct user_desc info;
+ int idx;
+
+ err = -EFAULT;
+ if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
+ goto out;
+ err = -EINVAL;
+ if (LDT_empty(&info))
+ goto out;
+
+ idx = info.entry_number;
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ goto out;
+
+ desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
+ desc->a = LDT_entry_a(&info);
+ desc->b = LDT_entry_b(&info);
+ }
+
- queue_multicall0(__HYPERVISOR_fpu_taskswitch);
++ p->thread.io_pl = current->thread.io_pl;
+
+ err = 0;
+ out:
+ if (err && p->thread.io_bitmap_ptr) {
+ kfree(p->thread.io_bitmap_ptr);
+ p->thread.io_bitmap_max = 0;
+ }
+ return err;
+ }
+
+ /*
+ * fill in the user structure for a core dump..
+ */
+ void dump_thread(struct pt_regs * regs, struct user * dump)
+ {
+ int i;
+
+ /* changed the size calculations - should hopefully work better. lbt */
+ dump->magic = CMAGIC;
+ dump->start_code = 0;
+ dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
+ dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
+ dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
+ dump->u_dsize -= dump->u_tsize;
+ dump->u_ssize = 0;
+ for (i = 0; i < 8; i++)
+ dump->u_debugreg[i] = current->thread.debugreg[i];
+
+ if (dump->start_stack < TASK_SIZE)
+ dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
+
+ dump->regs.ebx = regs->ebx;
+ dump->regs.ecx = regs->ecx;
+ dump->regs.edx = regs->edx;
+ dump->regs.esi = regs->esi;
+ dump->regs.edi = regs->edi;
+ dump->regs.ebp = regs->ebp;
+ dump->regs.eax = regs->eax;
+ dump->regs.ds = regs->xds;
+ dump->regs.es = regs->xes;
+ savesegment(fs,dump->regs.fs);
+ savesegment(gs,dump->regs.gs);
+ dump->regs.orig_eax = regs->orig_eax;
+ dump->regs.eip = regs->eip;
+ dump->regs.cs = regs->xcs;
+ dump->regs.eflags = regs->eflags;
+ dump->regs.esp = regs->esp;
+ dump->regs.ss = regs->xss;
+
+ dump->u_fpvalid = dump_fpu (regs, &dump->i387);
+ }
+
+ /*
+ * Capture the user space registers if the task is not running (in user space)
+ */
+ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
+ {
+ struct pt_regs ptregs;
+
+ ptregs = *(struct pt_regs *)
+ ((unsigned long)tsk->thread_info+THREAD_SIZE - sizeof(ptregs));
+ ptregs.xcs &= 0xffff;
+ ptregs.xds &= 0xffff;
+ ptregs.xes &= 0xffff;
+ ptregs.xss &= 0xffff;
+
+ elf_core_copy_regs(regs, &ptregs);
+
+ boot_option_idle_override = 1;
+ return 1;
+ }
+
+ static inline void
+ handle_io_bitmap(struct thread_struct *next, struct tss_struct *tss)
+ {
+ if (!next->io_bitmap_ptr) {
+ /*
+ * Disable the bitmap via an invalid offset. We still cache
+ * the previous bitmap owner and the IO bitmap contents:
+ */
+ tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
+ return;
+ }
+ if (likely(next == tss->io_bitmap_owner)) {
+ /*
+ * Previous owner of the bitmap (hence the bitmap content)
+ * matches the next task, we dont have to do anything but
+ * to set a valid offset in the TSS:
+ */
+ tss->io_bitmap_base = IO_BITMAP_OFFSET;
+ return;
+ }
+ /*
+ * Lazy TSS's I/O bitmap copy. We set an invalid offset here
+ * and we let the task to get a GPF in case an I/O instruction
+ * is performed. The handler of the GPF will verify that the
+ * faulting task has a valid I/O bitmap and, it true, does the
+ * real copy and restart the instruction. This will save us
+ * redundant copies when the currently switched task does not
+ * perform any I/O during its timeslice.
+ */
+ tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
+ }
+ /*
+ * This special macro can be used to load a debugging register
+ */
+ #define loaddebug(thread,register) \
+ HYPERVISOR_set_debugreg((register), \
+ (thread->debugreg[register]))
+
+ /*
+ * switch_to(x,yn) should switch tasks from x to y.
+ *
+ * We fsave/fwait so that an exception goes off at the right time
+ * (as a call from the fsave or fwait in effect) rather than to
+ * the wrong process. Lazy FP saving no longer makes any sense
+ * with modern CPU's, and this simplifies a lot of things (SMP
+ * and UP become the same).
+ *
+ * NOTE! We used to use the x86 hardware context switching. The
+ * reason for not using it any more becomes apparent when you
+ * try to recover gracefully from saved state that is no longer
+ * valid (stale segment register values in particular). With the
+ * hardware task-switch, there is no way to fix up bad state in
+ * a reasonable manner.
+ *
+ * The fact that Intel documents the hardware task-switching to
+ * be slow is a fairly red herring - this code is not noticeably
+ * faster. However, there _is_ some room for improvement here,
+ * so the performance issues may eventually be a valid point.
+ * More important, however, is the fact that this allows us much
+ * more flexibility.
+ *
+ * The return value (in %eax) will be the "prev" task after
+ * the task-switch, and shows up in ret_from_fork in entry.S,
+ * for example.
+ */
+ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ {
+ struct thread_struct *prev = &prev_p->thread,
+ *next = &next_p->thread;
+ int cpu = smp_processor_id();
+ struct tss_struct *tss = &per_cpu(init_tss, cpu);
+ dom0_op_t op;
+
+ /* NB. No need to disable interrupts as already done in sched.c */
+ /* __cli(); */
+
+ /*
+ * Save away %fs and %gs. No need to save %es and %ds, as
+ * those are always kernel segments while inside the kernel.
+ */
+ asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs));
+ asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs));
+
+ /*
+ * We clobber FS and GS here so that we avoid a GPF when restoring
+ * previous task's FS/GS values in Xen when the LDT is switched.
+ */
+ __asm__ __volatile__ (
+ "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs" : : :
+ "eax" );
+
+ MULTICALL_flush_page_update_queue();
+
+ /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
+
+ /*
+ * This is basically '__unlazy_fpu', except that we queue a
+ * multicall to indicate FPU task switch, rather than
+ * synchronously trapping to Xen.
+ */
+ if (prev_p->thread_info->status & TS_USEDFPU) {
+ __save_init_fpu(prev_p); /* _not_ save_init_fpu() */
++ queue_multicall1(__HYPERVISOR_fpu_taskswitch, 1);
+ }
+
+ /*
+ * Reload esp0, LDT and the page table pointer:
+ * This is load_esp0(tss, next) with a multicall.
+ */
+ tss->esp0 = next->esp0;
+ queue_multicall2(__HYPERVISOR_stack_switch, tss->ss0, tss->esp0);
+
+ /*
+ * Load the per-thread Thread-Local Storage descriptor.
+ * This is load_TLS(next, cpu) with multicalls.
+ */
+ #define C(i) do { \
+ if (unlikely(next->tls_array[i].a != prev->tls_array[i].a || \
+ next->tls_array[i].b != prev->tls_array[i].b)) \
+ queue_multicall3(__HYPERVISOR_update_descriptor, \
+ virt_to_machine(&get_cpu_gdt_table(cpu) \
+ [GDT_ENTRY_TLS_MIN + i]), \
+ ((u32 *)&next->tls_array[i])[0], \
+ ((u32 *)&next->tls_array[i])[1]); \
+ } while (0)
+ C(0); C(1); C(2);
+ #undef C
+
+ if (xen_start_info.flags & SIF_PRIVILEGED) {
+ op.cmd = DOM0_IOPL;
+ op.u.iopl.domain = DOMID_SELF;
+ op.u.iopl.iopl = next->io_pl;
+ op.interface_version = DOM0_INTERFACE_VERSION;
+ queue_multicall1(__HYPERVISOR_dom0_op, (unsigned long)&op);
+ }
+
+ /* EXECUTE ALL TASK SWITCH XEN SYSCALLS AT THIS POINT. */
+ execute_multicall_list();
+ /* __sti(); */
+
+ /*
+ * Restore %fs and %gs if needed.
+ */
+ if (unlikely(next->fs | next->gs)) {
+ loadsegment(fs, next->fs);
+ loadsegment(gs, next->gs);
+ }
+
+ /*
+ * Now maybe reload the debug registers
+ */
+ if (unlikely(next->debugreg[7])) {
+ loaddebug(next, 0);
+ loaddebug(next, 1);
+ loaddebug(next, 2);
+ loaddebug(next, 3);
+ /* no 4 and 5 */
+ loaddebug(next, 6);
+ loaddebug(next, 7);
+ }
+
+ if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr))
+ handle_io_bitmap(next, tss);
+
+ return prev_p;
+ }
+
+ asmlinkage int sys_fork(struct pt_regs regs)
+ {
+ return do_fork(SIGCHLD, regs.esp, ®s, 0, NULL, NULL);
+ }
+
+ asmlinkage int sys_clone(struct pt_regs regs)
+ {
+ unsigned long clone_flags;
+ unsigned long newsp;
+ int __user *parent_tidptr, *child_tidptr;
+
+ clone_flags = regs.ebx;
+ newsp = regs.ecx;
+ parent_tidptr = (int __user *)regs.edx;
+ child_tidptr = (int __user *)regs.edi;
+ if (!newsp)
+ newsp = regs.esp;
+ return do_fork(clone_flags, newsp, ®s, 0, parent_tidptr, child_tidptr);
+ }
+
+ /*
+ * This is trivial, and on the face of it looks like it
+ * could equally well be done in user mode.
+ *
+ * Not so, for quite unobvious reasons - register pressure.
+ * In user mode vfork() cannot have a stack frame, and if
+ * done by calling the "clone()" system call directly, you
+ * do not have enough call-clobbered registers to hold all
+ * the information you need.
+ */
+ asmlinkage int sys_vfork(struct pt_regs regs)
+ {
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, ®s, 0, NULL, NULL);
+ }
+
+ /*
+ * sys_execve() executes a new program.
+ */
+ asmlinkage int sys_execve(struct pt_regs regs)
+ {
+ int error;
+ char * filename;
+
+ filename = getname((char __user *) regs.ebx);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+ error = do_execve(filename,
+ (char __user * __user *) regs.ecx,
+ (char __user * __user *) regs.edx,
+ ®s);
+ if (error == 0) {
+ task_lock(current);
+ current->ptrace &= ~PT_DTRACE;
+ task_unlock(current);
+ /* Make sure we don't return using sysenter.. */
+ set_thread_flag(TIF_IRET);
+ }
+ putname(filename);
+ out:
+ return error;
+ }
+
+ #define top_esp (THREAD_SIZE - sizeof(unsigned long))
+ #define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
+
+ unsigned long get_wchan(struct task_struct *p)
+ {
+ unsigned long ebp, esp, eip;
+ unsigned long stack_page;
+ int count = 0;
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+ stack_page = (unsigned long)p->thread_info;
+ esp = p->thread.esp;
+ if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
+ return 0;
+ /* include/asm-i386/system.h:switch_to() pushes ebp last. */
+ ebp = *(unsigned long *) esp;
+ do {
+ if (ebp < stack_page || ebp > top_ebp+stack_page)
+ return 0;
+ eip = *(unsigned long *) (ebp+4);
+ if (!in_sched_functions(eip))
+ return eip;
+ ebp = *(unsigned long *) ebp;
+ } while (count++ < 16);
+ return 0;
+ }
+
+ /*
+ * sys_alloc_thread_area: get a yet unused TLS descriptor index.
+ */
+ static int get_free_idx(void)
+ {
+ struct thread_struct *t = ¤t->thread;
+ int idx;
+
+ for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
+ if (desc_empty(t->tls_array + idx))
+ return idx + GDT_ENTRY_TLS_MIN;
+ return -ESRCH;
+ }
+
+ /*
+ * Set a given TLS descriptor:
+ */
+ asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
+ {
+ struct thread_struct *t = ¤t->thread;
+ struct user_desc info;
+ struct desc_struct *desc;
+ int cpu, idx;
+
+ if (copy_from_user(&info, u_info, sizeof(info)))
+ return -EFAULT;
+ idx = info.entry_number;
+
+ /*
+ * index -1 means the kernel should try to find and
+ * allocate an empty descriptor:
+ */
+ if (idx == -1) {
+ idx = get_free_idx();
+ if (idx < 0)
+ return idx;
+ if (put_user(idx, &u_info->entry_number))
+ return -EFAULT;
+ }
+
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return -EINVAL;
+
+ desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
+
+ /*
+ * We must not get preempted while modifying the TLS.
+ */
+ cpu = get_cpu();
+
+ if (LDT_empty(&info)) {
+ desc->a = 0;
+ desc->b = 0;
+ } else {
+ desc->a = LDT_entry_a(&info);
+ desc->b = LDT_entry_b(&info);
+ }
+ load_TLS(t, cpu);
+
+ put_cpu();
+
+ return 0;
+ }
+
+ /*
+ * Get the current Thread-Local Storage area:
+ */
+
+ #define GET_BASE(desc) ( \
+ (((desc)->a >> 16) & 0x0000ffff) | \
+ (((desc)->b << 16) & 0x00ff0000) | \
+ ( (desc)->b & 0xff000000) )
+
+ #define GET_LIMIT(desc) ( \
+ ((desc)->a & 0x0ffff) | \
+ ((desc)->b & 0xf0000) )
+
+ #define GET_32BIT(desc) (((desc)->b >> 22) & 1)
+ #define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
+ #define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
+ #define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
+ #define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
+ #define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
+
+ asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
+ {
+ struct user_desc info;
+ struct desc_struct *desc;
+ int idx;
+
+ if (get_user(idx, &u_info->entry_number))
+ return -EFAULT;
+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
+ return -EINVAL;
+
+ desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
+
+ info.entry_number = idx;
+ info.base_addr = GET_BASE(desc);
+ info.limit = GET_LIMIT(desc);
+ info.seg_32bit = GET_32BIT(desc);
+ info.contents = GET_CONTENTS(desc);
+ info.read_exec_only = !GET_WRITABLE(desc);
+ info.limit_in_pages = GET_LIMIT_PAGES(desc);
+ info.seg_not_present = !GET_PRESENT(desc);
+ info.useable = GET_USEABLE(desc);
+
+ if (copy_to_user(u_info, &info, sizeof(info)))
+ return -EFAULT;
+ return 0;
+ }
+
--- /dev/null
-unsigned long *phys_to_machine_mapping, *pfn_to_mfn_frame_list;
+ /*
+ * linux/arch/i386/kernel/setup.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ *
+ * Memory region support
+ * David Parsons <orc@pell.chi.il.us>, July-August 1999
+ *
+ * Added E820 sanitization routine (removes overlapping memory regions);
+ * Brian Moyle <bmoyle@mvista.com>, February 2001
+ *
+ * Moved CPU detection code to cpu/${cpu}.c
+ * Patrick Mochel <mochel@osdl.org>, March 2002
+ *
+ * Provisions for empty E820 memory regions (reported by certain BIOSes).
+ * Alex Achenbach <xela@slit.de>, December 2002.
+ *
+ */
+
+ /*
+ * This file handles the architecture-dependent parts of initialization
+ */
+
+ #include <linux/sched.h>
+ #include <linux/mm.h>
+ #include <linux/tty.h>
+ #include <linux/ioport.h>
+ #include <linux/acpi.h>
+ #include <linux/apm_bios.h>
+ #include <linux/initrd.h>
+ #include <linux/bootmem.h>
+ #include <linux/seq_file.h>
+ #include <linux/console.h>
+ #include <linux/mca.h>
+ #include <linux/root_dev.h>
+ #include <linux/highmem.h>
+ #include <linux/module.h>
+ #include <linux/efi.h>
+ #include <linux/init.h>
+ #include <linux/edd.h>
++#include <linux/percpu.h>
+ #include <video/edid.h>
+ #include <asm/e820.h>
+ #include <asm/mpspec.h>
+ #include <asm/setup.h>
+ #include <asm/arch_hooks.h>
+ #include <asm/sections.h>
+ #include <asm/io_apic.h>
+ #include <asm/ist.h>
+ #include <asm/io.h>
+ #include <asm-xen/hypervisor.h>
+ #include "setup_arch_pre.h"
+ #include <bios_ebda.h>
+
+ /* Allows setting of maximum possible memory size */
+ static unsigned long xen_override_max_pfn;
+
+ int disable_pse __initdata = 0;
+
+ /*
+ * Machine setup..
+ */
+
+ #ifdef CONFIG_EFI
+ int efi_enabled = 0;
+ EXPORT_SYMBOL(efi_enabled);
+ #endif
+
+ /* cpu data as detected by the assembly code in head.S */
+ struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 0, 1, 0, -1 };
+ /* common cpu data for all cpus */
+ struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 0, 1, 0, -1 };
+
+ unsigned long mmu_cr4_features;
+ EXPORT_SYMBOL_GPL(mmu_cr4_features);
+
+ #ifdef CONFIG_ACPI_INTERPRETER
+ int acpi_disabled = 0;
+ #else
+ int acpi_disabled = 1;
+ #endif
+ EXPORT_SYMBOL(acpi_disabled);
+
+ #ifdef CONFIG_ACPI_BOOT
+ int __initdata acpi_force = 0;
+ extern acpi_interrupt_flags acpi_sci_flags;
+ #endif
+
+ /* for MCA, but anyone else can use it if they want */
+ unsigned int machine_id;
+ unsigned int machine_submodel_id;
+ unsigned int BIOS_revision;
+ unsigned int mca_pentium_flag;
+
+ /* For PCI or other memory-mapped resources */
+ unsigned long pci_mem_start = 0x10000000;
+
+ /* Boot loader ID as an integer, for the benefit of proc_dointvec */
+ int bootloader_type;
+
+ /* user-defined highmem size */
+ static unsigned int highmem_pages = -1;
+
+ /*
+ * Setup options
+ */
+ struct drive_info_struct { char dummy[32]; } drive_info;
+ struct screen_info screen_info;
+ struct apm_info apm_info;
+ struct sys_desc_table_struct {
+ unsigned short length;
+ unsigned char table[0];
+ };
+ struct edid_info edid_info;
+ struct ist_info ist_info;
+ struct e820map e820;
+
+ unsigned char aux_device_present;
+
+ extern void early_cpu_init(void);
+ extern void dmi_scan_machine(void);
+ extern void generic_apic_probe(char *);
+ extern int root_mountflags;
+
+ unsigned long saved_videomode;
+
+ #define RAMDISK_IMAGE_START_MASK 0x07FF
+ #define RAMDISK_PROMPT_FLAG 0x8000
+ #define RAMDISK_LOAD_FLAG 0x4000
+
+ static char command_line[COMMAND_LINE_SIZE];
+
+ unsigned char __initdata boot_params[PARAM_SIZE];
+
+ static struct resource data_resource = {
+ .name = "Kernel data",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+ };
+
+ static struct resource code_resource = {
+ .name = "Kernel code",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+ };
+
+ #ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ static struct resource system_rom_resource = {
+ .name = "System ROM",
+ .start = 0xf0000,
+ .end = 0xfffff,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+ };
+
+ static struct resource extension_rom_resource = {
+ .name = "Extension ROM",
+ .start = 0xe0000,
+ .end = 0xeffff,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+ };
+
+ static struct resource adapter_rom_resources[] = { {
+ .name = "Adapter ROM",
+ .start = 0xc8000,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+ }, {
+ .name = "Adapter ROM",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+ }, {
+ .name = "Adapter ROM",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+ }, {
+ .name = "Adapter ROM",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+ }, {
+ .name = "Adapter ROM",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+ }, {
+ .name = "Adapter ROM",
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+ } };
+
+ #define ADAPTER_ROM_RESOURCES \
+ (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
+
+ static struct resource video_rom_resource = {
+ .name = "Video ROM",
+ .start = 0xc0000,
+ .end = 0xc7fff,
+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
+ };
+ #endif
+
+ static struct resource video_ram_resource = {
+ .name = "Video RAM area",
+ .start = 0xa0000,
+ .end = 0xbffff,
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+ };
+
+ static struct resource standard_io_resources[] = { {
+ .name = "dma1",
+ .start = 0x0000,
+ .end = 0x001f,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+ }, {
+ .name = "pic1",
+ .start = 0x0020,
+ .end = 0x0021,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+ }, {
+ .name = "timer0",
+ .start = 0x0040,
+ .end = 0x0043,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+ }, {
+ .name = "timer1",
+ .start = 0x0050,
+ .end = 0x0053,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+ }, {
+ .name = "keyboard",
+ .start = 0x0060,
+ .end = 0x006f,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+ }, {
+ .name = "dma page reg",
+ .start = 0x0080,
+ .end = 0x008f,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+ }, {
+ .name = "pic2",
+ .start = 0x00a0,
+ .end = 0x00a1,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+ }, {
+ .name = "dma2",
+ .start = 0x00c0,
+ .end = 0x00df,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+ }, {
+ .name = "fpu",
+ .start = 0x00f0,
+ .end = 0x00ff,
+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
+ } };
+
+ #define STANDARD_IO_RESOURCES \
+ (sizeof standard_io_resources / sizeof standard_io_resources[0])
+
+ #ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ #define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
+
+ static int __init romchecksum(unsigned char *rom, unsigned long length)
+ {
+ unsigned char *p, sum = 0;
+
+ for (p = rom; p < rom + length; p++)
+ sum += *p;
+ return sum == 0;
+ }
+
+ static void __init probe_roms(void)
+ {
+ unsigned long start, length, upper;
+ unsigned char *rom;
+ int i;
+
+ /* video rom */
+ upper = adapter_rom_resources[0].start;
+ for (start = video_rom_resource.start; start < upper; start += 2048) {
+ rom = isa_bus_to_virt(start);
+ if (!romsignature(rom))
+ continue;
+
+ video_rom_resource.start = start;
+
+ /* 0 < length <= 0x7f * 512, historically */
+ length = rom[2] * 512;
+
+ /* if checksum okay, trust length byte */
+ if (length && romchecksum(rom, length))
+ video_rom_resource.end = start + length - 1;
+
+ request_resource(&iomem_resource, &video_rom_resource);
+ break;
+ }
+
+ start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
+ if (start < upper)
+ start = upper;
+
+ /* system rom */
+ request_resource(&iomem_resource, &system_rom_resource);
+ upper = system_rom_resource.start;
+
+ /* check for extension rom (ignore length byte!) */
+ rom = isa_bus_to_virt(extension_rom_resource.start);
+ if (romsignature(rom)) {
+ length = extension_rom_resource.end - extension_rom_resource.start + 1;
+ if (romchecksum(rom, length)) {
+ request_resource(&iomem_resource, &extension_rom_resource);
+ upper = extension_rom_resource.start;
+ }
+ }
+
+ /* check for adapter roms on 2k boundaries */
+ for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
+ rom = isa_bus_to_virt(start);
+ if (!romsignature(rom))
+ continue;
+
+ /* 0 < length <= 0x7f * 512, historically */
+ length = rom[2] * 512;
+
+ /* but accept any length that fits if checksum okay */
+ if (!length || start + length > upper || !romchecksum(rom, length))
+ continue;
+
+ adapter_rom_resources[i].start = start;
+ adapter_rom_resources[i].end = start + length - 1;
+ request_resource(&iomem_resource, &adapter_rom_resources[i]);
+
+ start = adapter_rom_resources[i++].end & ~2047UL;
+ }
+ }
+ #endif
+
+ /*
+ * Point at the empty zero page to start with. We map the real shared_info
+ * page as soon as fixmap is up and running.
+ */
+ shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
+ EXPORT_SYMBOL(HYPERVISOR_shared_info);
+
-multicall_entry_t multicall_list[8];
-int nr_multicall_ents = 0;
++unsigned int *phys_to_machine_mapping, *pfn_to_mfn_frame_list;
+ EXPORT_SYMBOL(phys_to_machine_mapping);
+
- phys_to_machine_mapping = (unsigned long *)xen_start_info.mfn_list;
++DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
++DEFINE_PER_CPU(int, nr_multicall_ents);
+
+ /* Raw start-of-day parameters from the hypervisor. */
+ union xen_start_info_union xen_start_info_union;
+
+ static void __init limit_regions(unsigned long long size)
+ {
+ unsigned long long current_addr = 0;
+ int i;
+
+ if (efi_enabled) {
+ for (i = 0; i < memmap.nr_map; i++) {
+ current_addr = memmap.map[i].phys_addr +
+ (memmap.map[i].num_pages << 12);
+ if (memmap.map[i].type == EFI_CONVENTIONAL_MEMORY) {
+ if (current_addr >= size) {
+ memmap.map[i].num_pages -=
+ (((current_addr-size) + PAGE_SIZE-1) >> PAGE_SHIFT);
+ memmap.nr_map = i + 1;
+ return;
+ }
+ }
+ }
+ }
+ for (i = 0; i < e820.nr_map; i++) {
+ if (e820.map[i].type == E820_RAM) {
+ current_addr = e820.map[i].addr + e820.map[i].size;
+ if (current_addr >= size) {
+ e820.map[i].size -= current_addr-size;
+ e820.nr_map = i + 1;
+ return;
+ }
+ }
+ }
+ }
+
+ static void __init add_memory_region(unsigned long long start,
+ unsigned long long size, int type)
+ {
+ int x;
+
+ if (!efi_enabled) {
+ x = e820.nr_map;
+
+ if (x == E820MAX) {
+ printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
+ return;
+ }
+
+ e820.map[x].addr = start;
+ e820.map[x].size = size;
+ e820.map[x].type = type;
+ e820.nr_map++;
+ }
+ } /* add_memory_region */
+
+ #define E820_DEBUG 1
+
+ static void __init print_memory_map(char *who)
+ {
+ int i;
+
+ for (i = 0; i < e820.nr_map; i++) {
+ printk(" %s: %016Lx - %016Lx ", who,
+ e820.map[i].addr,
+ e820.map[i].addr + e820.map[i].size);
+ switch (e820.map[i].type) {
+ case E820_RAM: printk("(usable)\n");
+ break;
+ case E820_RESERVED:
+ printk("(reserved)\n");
+ break;
+ case E820_ACPI:
+ printk("(ACPI data)\n");
+ break;
+ case E820_NVS:
+ printk("(ACPI NVS)\n");
+ break;
+ default: printk("type %lu\n", e820.map[i].type);
+ break;
+ }
+ }
+ }
+
+ #if 0
+ /*
+ * Sanitize the BIOS e820 map.
+ *
+ * Some e820 responses include overlapping entries. The following
+ * replaces the original e820 map with a new one, removing overlaps.
+ *
+ */
+ struct change_member {
+ struct e820entry *pbios; /* pointer to original bios entry */
+ unsigned long long addr; /* address for this change point */
+ };
+ struct change_member change_point_list[2*E820MAX] __initdata;
+ struct change_member *change_point[2*E820MAX] __initdata;
+ struct e820entry *overlap_list[E820MAX] __initdata;
+ struct e820entry new_bios[E820MAX] __initdata;
+
+ static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
+ {
+ struct change_member *change_tmp;
+ unsigned long current_type, last_type;
+ unsigned long long last_addr;
+ int chgidx, still_changing;
+ int overlap_entries;
+ int new_bios_entry;
+ int old_nr, new_nr, chg_nr;
+ int i;
+
+ /*
+ Visually we're performing the following (1,2,3,4 = memory types)...
+
+ Sample memory map (w/overlaps):
+ ____22__________________
+ ______________________4_
+ ____1111________________
+ _44_____________________
+ 11111111________________
+ ____________________33__
+ ___________44___________
+ __________33333_________
+ ______________22________
+ ___________________2222_
+ _________111111111______
+ _____________________11_
+ _________________4______
+
+ Sanitized equivalent (no overlap):
+ 1_______________________
+ _44_____________________
+ ___1____________________
+ ____22__________________
+ ______11________________
+ _________1______________
+ __________3_____________
+ ___________44___________
+ _____________33_________
+ _______________2________
+ ________________1_______
+ _________________4______
+ ___________________2____
+ ____________________33__
+ ______________________4_
+ */
+
+ /* if there's only one memory region, don't bother */
+ if (*pnr_map < 2)
+ return -1;
+
+ old_nr = *pnr_map;
+
+ /* bail out if we find any unreasonable addresses in bios map */
+ for (i=0; i<old_nr; i++)
+ if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
+ return -1;
+
+ /* create pointers for initial change-point information (for sorting) */
+ for (i=0; i < 2*old_nr; i++)
+ change_point[i] = &change_point_list[i];
+
+ /* record all known change-points (starting and ending addresses),
+ omitting those that are for empty memory regions */
+ chgidx = 0;
+ for (i=0; i < old_nr; i++) {
+ if (biosmap[i].size != 0) {
+ change_point[chgidx]->addr = biosmap[i].addr;
+ change_point[chgidx++]->pbios = &biosmap[i];
+ change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
+ change_point[chgidx++]->pbios = &biosmap[i];
+ }
+ }
+ chg_nr = chgidx; /* true number of change-points */
+
+ /* sort change-point list by memory addresses (low -> high) */
+ still_changing = 1;
+ while (still_changing) {
+ still_changing = 0;
+ for (i=1; i < chg_nr; i++) {
+ /* if <current_addr> > <last_addr>, swap */
+ /* or, if current=<start_addr> & last=<end_addr>, swap */
+ if ((change_point[i]->addr < change_point[i-1]->addr) ||
+ ((change_point[i]->addr == change_point[i-1]->addr) &&
+ (change_point[i]->addr == change_point[i]->pbios->addr) &&
+ (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
+ )
+ {
+ change_tmp = change_point[i];
+ change_point[i] = change_point[i-1];
+ change_point[i-1] = change_tmp;
+ still_changing=1;
+ }
+ }
+ }
+
+ /* create a new bios memory map, removing overlaps */
+ overlap_entries=0; /* number of entries in the overlap table */
+ new_bios_entry=0; /* index for creating new bios map entries */
+ last_type = 0; /* start with undefined memory type */
+ last_addr = 0; /* start with 0 as last starting address */
+ /* loop through change-points, determining affect on the new bios map */
+ for (chgidx=0; chgidx < chg_nr; chgidx++)
+ {
+ /* keep track of all overlapping bios entries */
+ if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
+ {
+ /* add map entry to overlap list (> 1 entry implies an overlap) */
+ overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
+ }
+ else
+ {
+ /* remove entry from list (order independent, so swap with last) */
+ for (i=0; i<overlap_entries; i++)
+ {
+ if (overlap_list[i] == change_point[chgidx]->pbios)
+ overlap_list[i] = overlap_list[overlap_entries-1];
+ }
+ overlap_entries--;
+ }
+ /* if there are overlapping entries, decide which "type" to use */
+ /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
+ current_type = 0;
+ for (i=0; i<overlap_entries; i++)
+ if (overlap_list[i]->type > current_type)
+ current_type = overlap_list[i]->type;
+ /* continue building up new bios map based on this information */
+ if (current_type != last_type) {
+ if (last_type != 0) {
+ new_bios[new_bios_entry].size =
+ change_point[chgidx]->addr - last_addr;
+ /* move forward only if the new size was non-zero */
+ if (new_bios[new_bios_entry].size != 0)
+ if (++new_bios_entry >= E820MAX)
+ break; /* no more space left for new bios entries */
+ }
+ if (current_type != 0) {
+ new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
+ new_bios[new_bios_entry].type = current_type;
+ last_addr=change_point[chgidx]->addr;
+ }
+ last_type = current_type;
+ }
+ }
+ new_nr = new_bios_entry; /* retain count for new bios entries */
+
+ /* copy new bios mapping into original location */
+ memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
+ *pnr_map = new_nr;
+
+ return 0;
+ }
+
+ /*
+ * Copy the BIOS e820 map into a safe place.
+ *
+ * Sanity-check it while we're at it..
+ *
+ * If we're lucky and live on a modern system, the setup code
+ * will have given us a memory map that we can use to properly
+ * set up memory. If we aren't, we'll fake a memory map.
+ *
+ * We check to see that the memory map contains at least 2 elements
+ * before we'll use it, because the detection code in setup.S may
+ * not be perfect and most every PC known to man has two memory
+ * regions: one from 0 to 640k, and one from 1mb up. (The IBM
+ * thinkpad 560x, for example, does not cooperate with the memory
+ * detection code.)
+ */
+ static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
+ {
+ /* Only one memory region (or negative)? Ignore it */
+ if (nr_map < 2)
+ return -1;
+
+ do {
+ unsigned long long start = biosmap->addr;
+ unsigned long long size = biosmap->size;
+ unsigned long long end = start + size;
+ unsigned long type = biosmap->type;
+
+ /* Overflow in 64 bits? Ignore the memory map. */
+ if (start > end)
+ return -1;
+
+ /*
+ * Some BIOSes claim RAM in the 640k - 1M region.
+ * Not right. Fix it up.
+ */
+ if (type == E820_RAM) {
+ if (start < 0x100000ULL && end > 0xA0000ULL) {
+ if (start < 0xA0000ULL)
+ add_memory_region(start, 0xA0000ULL-start, type);
+ if (end <= 0x100000ULL)
+ continue;
+ start = 0x100000ULL;
+ size = end - start;
+ }
+ }
+ add_memory_region(start, size, type);
+ } while (biosmap++,--nr_map);
+ return 0;
+ }
+ #endif
+
+ #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
+ struct edd edd;
+ #ifdef CONFIG_EDD_MODULE
+ EXPORT_SYMBOL(edd);
+ #endif
+ /**
+ * copy_edd() - Copy the BIOS EDD information
+ * from boot_params into a safe place.
+ *
+ */
+ static inline void copy_edd(void)
+ {
+ memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
+ memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
+ edd.mbr_signature_nr = EDD_MBR_SIG_NR;
+ edd.edd_info_nr = EDD_NR;
+ }
+ #else
+ static inline void copy_edd(void)
+ {
+ }
+ #endif
+
+ /*
+ * Do NOT EVER look at the BIOS memory size location.
+ * It does not work on many machines.
+ */
+ #define LOWMEMSIZE() (0x9f000)
+
+ static void __init parse_cmdline_early (char ** cmdline_p)
+ {
+ char c = ' ', *to = command_line, *from = saved_command_line;
+ int len = 0;
+ int userdef = 0;
+
+ memcpy(saved_command_line, xen_start_info.cmd_line, MAX_CMDLINE);
+ /* Save unparsed command line copy for /proc/cmdline */
+ saved_command_line[COMMAND_LINE_SIZE-1] = '\0';
+
+ for (;;) {
+ if (c != ' ')
+ goto next_char;
+ /*
+ * "mem=nopentium" disables the 4MB page tables.
+ * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
+ * to <mem>, overriding the bios size.
+ * "memmap=XXX[KkmM]@XXX[KkmM]" defines a memory region from
+ * <start> to <start>+<mem>, overriding the bios size.
+ *
+ * HPA tells me bootloaders need to parse mem=, so no new
+ * option should be mem= [also see Documentation/i386/boot.txt]
+ */
+ if (!memcmp(from, "mem=", 4)) {
+ if (to != command_line)
+ to--;
+ if (!memcmp(from+4, "nopentium", 9)) {
+ from += 9+4;
+ clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
+ disable_pse = 1;
+ } else {
+ /* If the user specifies memory size, we
+ * limit the BIOS-provided memory map to
+ * that size. exactmap can be used to specify
+ * the exact map. mem=number can be used to
+ * trim the existing memory map.
+ */
+ unsigned long long mem_size;
+
+ mem_size = memparse(from+4, &from);
+ #if 0
+ limit_regions(mem_size);
+ userdef=1;
+ #else
+ xen_override_max_pfn =
+ (unsigned long)(mem_size>>PAGE_SHIFT);
+ #endif
+ }
+ }
+
+ else if (!memcmp(from, "memmap=", 7)) {
+ if (to != command_line)
+ to--;
+ if (!memcmp(from+7, "exactmap", 8)) {
+ from += 8+7;
+ e820.nr_map = 0;
+ userdef = 1;
+ } else {
+ /* If the user specifies memory size, we
+ * limit the BIOS-provided memory map to
+ * that size. exactmap can be used to specify
+ * the exact map. mem=number can be used to
+ * trim the existing memory map.
+ */
+ unsigned long long start_at, mem_size;
+
+ mem_size = memparse(from+7, &from);
+ if (*from == '@') {
+ start_at = memparse(from+1, &from);
+ add_memory_region(start_at, mem_size, E820_RAM);
+ } else if (*from == '#') {
+ start_at = memparse(from+1, &from);
+ add_memory_region(start_at, mem_size, E820_ACPI);
+ } else if (*from == '$') {
+ start_at = memparse(from+1, &from);
+ add_memory_region(start_at, mem_size, E820_RESERVED);
+ } else {
+ limit_regions(mem_size);
+ userdef=1;
+ }
+ }
+ }
+
+ else if (!memcmp(from, "noexec=", 7))
+ noexec_setup(from + 7);
+
+
+ #ifdef CONFIG_X86_SMP
+ /*
+ * If the BIOS enumerates physical processors before logical,
+ * maxcpus=N at enumeration-time can be used to disable HT.
+ */
+ else if (!memcmp(from, "maxcpus=", 8)) {
+ extern unsigned int maxcpus;
+
+ maxcpus = simple_strtoul(from + 8, NULL, 0);
+ }
+ #endif
+
+ #ifdef CONFIG_ACPI_BOOT
+ /* "acpi=off" disables both ACPI table parsing and interpreter */
+ else if (!memcmp(from, "acpi=off", 8)) {
+ disable_acpi();
+ }
+
+ /* acpi=force to over-ride black-list */
+ else if (!memcmp(from, "acpi=force", 10)) {
+ acpi_force = 1;
+ acpi_ht = 1;
+ acpi_disabled = 0;
+ }
+
+ /* acpi=strict disables out-of-spec workarounds */
+ else if (!memcmp(from, "acpi=strict", 11)) {
+ acpi_strict = 1;
+ }
+
+ /* Limit ACPI just to boot-time to enable HT */
+ else if (!memcmp(from, "acpi=ht", 7)) {
+ if (!acpi_force)
+ disable_acpi();
+ acpi_ht = 1;
+ }
+
+ /* "pci=noacpi" disable ACPI IRQ routing and PCI scan */
+ else if (!memcmp(from, "pci=noacpi", 10)) {
+ acpi_disable_pci();
+ }
+ /* "acpi=noirq" disables ACPI interrupt routing */
+ else if (!memcmp(from, "acpi=noirq", 10)) {
+ acpi_noirq_set();
+ }
+
+ else if (!memcmp(from, "acpi_sci=edge", 13))
+ acpi_sci_flags.trigger = 1;
+
+ else if (!memcmp(from, "acpi_sci=level", 14))
+ acpi_sci_flags.trigger = 3;
+
+ else if (!memcmp(from, "acpi_sci=high", 13))
+ acpi_sci_flags.polarity = 1;
+
+ else if (!memcmp(from, "acpi_sci=low", 12))
+ acpi_sci_flags.polarity = 3;
+
+ #ifdef CONFIG_X86_IO_APIC
+ else if (!memcmp(from, "acpi_skip_timer_override", 24))
+ acpi_skip_timer_override = 1;
+ #endif
+
+ #ifdef CONFIG_X86_LOCAL_APIC
+ /* disable IO-APIC */
+ else if (!memcmp(from, "noapic", 6))
+ disable_ioapic_setup();
+ #endif /* CONFIG_X86_LOCAL_APIC */
+ #endif /* CONFIG_ACPI_BOOT */
+
+ /*
+ * highmem=size forces highmem to be exactly 'size' bytes.
+ * This works even on boxes that have no highmem otherwise.
+ * This also works to reduce highmem size on bigger boxes.
+ */
+ else if (!memcmp(from, "highmem=", 8))
+ highmem_pages = memparse(from+8, &from) >> PAGE_SHIFT;
+
+ /*
+ * vmalloc=size forces the vmalloc area to be exactly 'size'
+ * bytes. This can be used to increase (or decrease) the
+ * vmalloc area - the default is 128m.
+ */
+ else if (!memcmp(from, "vmalloc=", 8))
+ __VMALLOC_RESERVE = memparse(from+8, &from);
+
+ next_char:
+ c = *(from++);
+ if (!c)
+ break;
+ if (COMMAND_LINE_SIZE <= ++len)
+ break;
+ *(to++) = c;
+ }
+ *to = '\0';
+ *cmdline_p = command_line;
+ if (userdef) {
+ printk(KERN_INFO "user-defined physical RAM map:\n");
+ print_memory_map("user");
+ }
+ }
+
+ #if 0 /* !XEN */
+ /*
+ * Callback for efi_memory_walk.
+ */
+ static int __init
+ efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
+ {
+ unsigned long *max_pfn = arg, pfn;
+
+ if (start < end) {
+ pfn = PFN_UP(end -1);
+ if (pfn > *max_pfn)
+ *max_pfn = pfn;
+ }
+ return 0;
+ }
+
+ /*
+ * Find the highest page frame number we have available
+ */
+ void __init find_max_pfn(void)
+ {
+ int i;
+
+ max_pfn = 0;
+ if (efi_enabled) {
+ efi_memmap_walk(efi_find_max_pfn, &max_pfn);
+ return;
+ }
+
+ for (i = 0; i < e820.nr_map; i++) {
+ unsigned long start, end;
+ /* RAM? */
+ if (e820.map[i].type != E820_RAM)
+ continue;
+ start = PFN_UP(e820.map[i].addr);
+ end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
+ if (start >= end)
+ continue;
+ if (end > max_pfn)
+ max_pfn = end;
+ }
+ }
+ #else
+ /* We don't use the fake e820 because we need to respond to user override. */
+ void __init find_max_pfn(void)
+ {
+ if ( xen_override_max_pfn < xen_start_info.nr_pages )
+ xen_override_max_pfn = xen_start_info.nr_pages;
+ max_pfn = xen_override_max_pfn;
+ }
+ #endif /* XEN */
+
+ /*
+ * Determine low and high memory ranges:
+ */
+ unsigned long __init find_max_low_pfn(void)
+ {
+ unsigned long max_low_pfn;
+
+ max_low_pfn = max_pfn;
+ if (max_low_pfn > MAXMEM_PFN) {
+ if (highmem_pages == -1)
+ highmem_pages = max_pfn - MAXMEM_PFN;
+ if (highmem_pages + MAXMEM_PFN < max_pfn)
+ max_pfn = MAXMEM_PFN + highmem_pages;
+ if (highmem_pages + MAXMEM_PFN > max_pfn) {
+ printk("only %luMB highmem pages available, ignoring highmem size of %uMB.\n", pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages));
+ highmem_pages = 0;
+ }
+ max_low_pfn = MAXMEM_PFN;
+ #ifndef CONFIG_HIGHMEM
+ /* Maximum memory usable is what is directly addressable */
+ printk(KERN_WARNING "Warning only %ldMB will be used.\n",
+ MAXMEM>>20);
+ if (max_pfn > MAX_NONPAE_PFN)
+ printk(KERN_WARNING "Use a PAE enabled kernel.\n");
+ else
+ printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
+ max_pfn = MAXMEM_PFN;
+ #else /* !CONFIG_HIGHMEM */
+ #ifndef CONFIG_X86_PAE
+ if (max_pfn > MAX_NONPAE_PFN) {
+ max_pfn = MAX_NONPAE_PFN;
+ printk(KERN_WARNING "Warning only 4GB will be used.\n");
+ printk(KERN_WARNING "Use a PAE enabled kernel.\n");
+ }
+ #endif /* !CONFIG_X86_PAE */
+ #endif /* !CONFIG_HIGHMEM */
+ } else {
+ if (highmem_pages == -1)
+ highmem_pages = 0;
+ #ifdef CONFIG_HIGHMEM
+ if (highmem_pages >= max_pfn) {
+ printk(KERN_ERR "highmem size specified (%uMB) is bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
+ highmem_pages = 0;
+ }
+ if (highmem_pages) {
+ if (max_low_pfn-highmem_pages < 64*1024*1024/PAGE_SIZE){
+ printk(KERN_ERR "highmem size %uMB results in smaller than 64MB lowmem, ignoring it.\n", pages_to_mb(highmem_pages));
+ highmem_pages = 0;
+ }
+ max_low_pfn -= highmem_pages;
+ }
+ #else
+ if (highmem_pages)
+ printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
+ #endif
+ }
+ return max_low_pfn;
+ }
+
+ #ifndef CONFIG_DISCONTIGMEM
+
+ /*
+ * Free all available memory for boot time allocation. Used
+ * as a callback function by efi_memory_walk()
+ */
+
+ static int __init
+ free_available_memory(unsigned long start, unsigned long end, void *arg)
+ {
+ /* check max_low_pfn */
+ if (start >= ((max_low_pfn + 1) << PAGE_SHIFT))
+ return 0;
+ if (end >= ((max_low_pfn + 1) << PAGE_SHIFT))
+ end = (max_low_pfn + 1) << PAGE_SHIFT;
+ if (start < end)
+ free_bootmem(start, end - start);
+
+ return 0;
+ }
+ /*
+ * Register fully available low RAM pages with the bootmem allocator.
+ */
+ static void __init register_bootmem_low_pages(unsigned long max_low_pfn)
+ {
+ int i;
+
+ if (efi_enabled) {
+ efi_memmap_walk(free_available_memory, NULL);
+ return;
+ }
+ for (i = 0; i < e820.nr_map; i++) {
+ unsigned long curr_pfn, last_pfn, size;
+ /*
+ * Reserve usable low memory
+ */
+ if (e820.map[i].type != E820_RAM)
+ continue;
+ /*
+ * We are rounding up the start address of usable memory:
+ */
+ curr_pfn = PFN_UP(e820.map[i].addr);
+ if (curr_pfn >= max_low_pfn)
+ continue;
+ /*
+ * ... and at the end of the usable range downwards:
+ */
+ last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
+
+ if (last_pfn > max_low_pfn)
+ last_pfn = max_low_pfn;
+
+ /*
+ * .. finally, did all the rounding and playing
+ * around just make the area go away?
+ */
+ if (last_pfn <= curr_pfn)
+ continue;
+
+ size = last_pfn - curr_pfn;
+ free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
+ }
+ }
+
+ /*
+ * workaround for Dell systems that neglect to reserve EBDA
+ */
+ static void __init reserve_ebda_region(void)
+ {
+ unsigned int addr;
+ addr = get_bios_ebda();
+ if (addr)
+ reserve_bootmem(addr, PAGE_SIZE);
+ }
+
+ static unsigned long __init setup_memory(void)
+ {
+ unsigned long bootmap_size, start_pfn, max_low_pfn;
+
+ /*
+ * partially used pages are not usable - thus
+ * we are rounding upwards:
+ */
+ start_pfn = PFN_UP(__pa(xen_start_info.pt_base)) + xen_start_info.nr_pt_frames;
+
+ find_max_pfn();
+
+ max_low_pfn = find_max_low_pfn();
+
+ #ifdef CONFIG_HIGHMEM
+ highstart_pfn = highend_pfn = max_pfn;
+ if (max_pfn > max_low_pfn) {
+ highstart_pfn = max_low_pfn;
+ }
+ printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
+ pages_to_mb(highend_pfn - highstart_pfn));
+ #endif
+ printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
+ pages_to_mb(max_low_pfn));
+ /*
+ * Initialize the boot-time allocator (with low memory only):
+ */
+ bootmap_size = init_bootmem(start_pfn, max_low_pfn);
+
+ register_bootmem_low_pages(max_low_pfn);
+
+ /*
+ * Reserve the bootmem bitmap itself as well. We do this in two
+ * steps (first step was init_bootmem()) because this catches
+ * the (very unlikely) case of us accidentally initializing the
+ * bootmem allocator with an invalid RAM area.
+ */
+ reserve_bootmem(HIGH_MEMORY, (PFN_PHYS(start_pfn) +
+ bootmap_size + PAGE_SIZE-1) - (HIGH_MEMORY));
+
+ /* reserve EBDA region, it's a 4K region */
+ reserve_ebda_region();
+
+ /* could be an AMD 768MPX chipset. Reserve a page before VGA to prevent
+ PCI prefetch into it (errata #56). Usually the page is reserved anyways,
+ unless you have no PS/2 mouse plugged in. */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ boot_cpu_data.x86 == 6)
+ reserve_bootmem(0xa0000 - 4096, 4096);
+
+ #ifdef CONFIG_SMP
+ /*
+ * But first pinch a few for the stack/trampoline stuff
+ * FIXME: Don't need the extra page at 4K, but need to fix
+ * trampoline before removing it. (see the GDT stuff)
+ */
+ reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
+ #endif
+ #ifdef CONFIG_ACPI_SLEEP
+ /*
+ * Reserve low memory region for sleep support.
+ */
+ acpi_reserve_bootmem();
+ #endif
+ #ifdef CONFIG_X86_FIND_SMP_CONFIG
+ /*
+ * Find and reserve possible boot-time SMP configuration:
+ */
+ find_smp_config();
+ #endif
+
+ #ifdef CONFIG_BLK_DEV_INITRD
+ if (xen_start_info.mod_start) {
+ if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
+ /*reserve_bootmem(INITRD_START, INITRD_SIZE);*/
+ initrd_start = INITRD_START + PAGE_OFFSET;
+ initrd_end = initrd_start+INITRD_SIZE;
+ initrd_below_start_ok = 1;
+ }
+ else {
+ printk(KERN_ERR "initrd extends beyond end of memory "
+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
+ INITRD_START + INITRD_SIZE,
+ max_low_pfn << PAGE_SHIFT);
+ initrd_start = 0;
+ }
+ }
+ #endif
+
++ phys_to_machine_mapping = (unsigned int *)xen_start_info.mfn_list;
+
+ return max_low_pfn;
+ }
+ #else
+ extern unsigned long setup_memory(void);
+ #endif /* !CONFIG_DISCONTIGMEM */
+
+ /*
+ * Request address space for all standard RAM and ROM resources
+ * and also for regions reported as reserved by the e820.
+ */
+ static void __init
+ legacy_init_iomem_resources(struct resource *code_resource, struct resource *data_resource)
+ {
+ int i;
+
+ #ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ probe_roms();
+ #endif
+ for (i = 0; i < e820.nr_map; i++) {
+ struct resource *res;
+ if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
+ continue;
+ res = alloc_bootmem_low(sizeof(struct resource));
+ switch (e820.map[i].type) {
+ case E820_RAM: res->name = "System RAM"; break;
+ case E820_ACPI: res->name = "ACPI Tables"; break;
+ case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
+ default: res->name = "reserved";
+ }
+ res->start = e820.map[i].addr;
+ res->end = res->start + e820.map[i].size - 1;
+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ request_resource(&iomem_resource, res);
+ if (e820.map[i].type == E820_RAM) {
+ /*
+ * We don't know which RAM region contains kernel data,
+ * so we try it repeatedly and let the resource manager
+ * test it.
+ */
+ request_resource(res, code_resource);
+ request_resource(res, data_resource);
+ }
+ }
+ }
+
+ /*
+ * Request address space for all standard resources
+ */
+ static void __init register_memory(void)
+ {
+ unsigned long gapstart, gapsize;
+ unsigned long long last;
+ int i;
+
+ if (efi_enabled)
+ efi_initialize_iomem_resources(&code_resource, &data_resource);
+ else
+ legacy_init_iomem_resources(&code_resource, &data_resource);
+
+ /* EFI systems may still have VGA */
+ request_resource(&iomem_resource, &video_ram_resource);
+
+ /* request I/O space for devices used on all i[345]86 PCs */
+ for (i = 0; i < STANDARD_IO_RESOURCES; i++)
+ request_resource(&ioport_resource, &standard_io_resources[i]);
+
+ /*
+ * Search for the bigest gap in the low 32 bits of the e820
+ * memory space.
+ */
+ last = 0x100000000ull;
+ gapstart = 0x10000000;
+ gapsize = 0x400000;
+ i = e820.nr_map;
+ while (--i >= 0) {
+ unsigned long long start = e820.map[i].addr;
+ unsigned long long end = start + e820.map[i].size;
+
+ /*
+ * Since "last" is at most 4GB, we know we'll
+ * fit in 32 bits if this condition is true
+ */
+ if (last > end) {
+ unsigned long gap = last - end;
+
+ if (gap > gapsize) {
+ gapsize = gap;
+ gapstart = end;
+ }
+ }
+ if (start < last)
+ last = start;
+ }
+
+ /*
+ * Start allocating dynamic PCI memory a bit into the gap,
+ * aligned up to the nearest megabyte.
+ *
+ * Question: should we try to pad it up a bit (do something
+ * like " + (gapsize >> 3)" in there too?). We now have the
+ * technology.
+ */
+ pci_mem_start = (gapstart + 0xfffff) & ~0xfffff;
+
+ printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n",
+ pci_mem_start, gapstart, gapsize);
+ }
+
+ /* Use inline assembly to define this because the nops are defined
+ as inline assembly strings in the include files and we cannot
+ get them easily into strings. */
+ asm("\t.data\nintelnops: "
+ GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
+ GENERIC_NOP7 GENERIC_NOP8);
+ asm("\t.data\nk8nops: "
+ K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
+ K8_NOP7 K8_NOP8);
+ asm("\t.data\nk7nops: "
+ K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
+ K7_NOP7 K7_NOP8);
+
+ extern unsigned char intelnops[], k8nops[], k7nops[];
+ static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
+ NULL,
+ intelnops,
+ intelnops + 1,
+ intelnops + 1 + 2,
+ intelnops + 1 + 2 + 3,
+ intelnops + 1 + 2 + 3 + 4,
+ intelnops + 1 + 2 + 3 + 4 + 5,
+ intelnops + 1 + 2 + 3 + 4 + 5 + 6,
+ intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
+ };
+ static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
+ NULL,
+ k8nops,
+ k8nops + 1,
+ k8nops + 1 + 2,
+ k8nops + 1 + 2 + 3,
+ k8nops + 1 + 2 + 3 + 4,
+ k8nops + 1 + 2 + 3 + 4 + 5,
+ k8nops + 1 + 2 + 3 + 4 + 5 + 6,
+ k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
+ };
+ static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
+ NULL,
+ k7nops,
+ k7nops + 1,
+ k7nops + 1 + 2,
+ k7nops + 1 + 2 + 3,
+ k7nops + 1 + 2 + 3 + 4,
+ k7nops + 1 + 2 + 3 + 4 + 5,
+ k7nops + 1 + 2 + 3 + 4 + 5 + 6,
+ k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
+ };
+ static struct nop {
+ int cpuid;
+ unsigned char **noptable;
+ } noptypes[] = {
+ { X86_FEATURE_K8, k8_nops },
+ { X86_FEATURE_K7, k7_nops },
+ { -1, NULL }
+ };
+
+ /* Replace instructions with better alternatives for this CPU type.
+
+ This runs before SMP is initialized to avoid SMP problems with
+ self modifying code. This implies that assymetric systems where
+ APs have less capabilities than the boot processor are not handled.
+ In this case boot with "noreplacement". */
+ void apply_alternatives(void *start, void *end)
+ {
+ struct alt_instr *a;
+ int diff, i, k;
+ unsigned char **noptable = intel_nops;
+ for (i = 0; noptypes[i].cpuid >= 0; i++) {
+ if (boot_cpu_has(noptypes[i].cpuid)) {
+ noptable = noptypes[i].noptable;
+ break;
+ }
+ }
+ for (a = start; (void *)a < end; a++) {
+ if (!boot_cpu_has(a->cpuid))
+ continue;
+ BUG_ON(a->replacementlen > a->instrlen);
+ memcpy(a->instr, a->replacement, a->replacementlen);
+ diff = a->instrlen - a->replacementlen;
+ /* Pad the rest with nops */
+ for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
+ k = diff;
+ if (k > ASM_NOP_MAX)
+ k = ASM_NOP_MAX;
+ memcpy(a->instr + i, noptable[k], k);
+ }
+ }
+ }
+
+ static int no_replacement __initdata = 0;
+
+ void __init alternative_instructions(void)
+ {
+ extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
+ if (no_replacement)
+ return;
+ apply_alternatives(__alt_instructions, __alt_instructions_end);
+ }
+
+ static int __init noreplacement_setup(char *s)
+ {
+ no_replacement = 1;
+ return 0;
+ }
+
+ __setup("noreplacement", noreplacement_setup);
+
+ static char * __init machine_specific_memory_setup(void);
+
+ #ifdef CONFIG_MCA
+ static void set_mca_bus(int x)
+ {
+ MCA_bus = x;
+ }
+ #else
+ static void set_mca_bus(int x) { }
+ #endif
+
+ /*
+ * Determine if we were loaded by an EFI loader. If so, then we have also been
+ * passed the efi memmap, systab, etc., so we should use these data structures
+ * for initialization. Note, the efi init code path is determined by the
+ * global efi_enabled. This allows the same kernel image to be used on existing
+ * systems (with a traditional BIOS) as well as on EFI systems.
+ */
+ void __init setup_arch(char **cmdline_p)
+ {
+ int i,j;
+
+ unsigned long max_low_pfn;
+
+ /* Force a quick death if the kernel panics. */
+ extern int panic_timeout;
+ if ( panic_timeout == 0 )
+ panic_timeout = 1;
+
+ HYPERVISOR_vm_assist(VMASST_CMD_enable,
+ VMASST_TYPE_4gb_segments);
+
+ memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
+ early_cpu_init();
+
+ /*
+ * FIXME: This isn't an official loader_type right
+ * now but does currently work with elilo.
+ * If we were configured as an EFI kernel, check to make
+ * sure that we were loaded correctly from elilo and that
+ * the system table is valid. If not, then initialize normally.
+ */
+ #ifdef CONFIG_EFI
+ if ((LOADER_TYPE == 0x50) && EFI_SYSTAB)
+ efi_enabled = 1;
+ #endif
+
+ /* This must be initialized to UNNAMED_MAJOR for ipconfig to work
+ properly. Setting ROOT_DEV to default to /dev/ram0 breaks initrd.
+ */
+ ROOT_DEV = MKDEV(UNNAMED_MAJOR,0);
+ drive_info = DRIVE_INFO;
+ screen_info = SCREEN_INFO;
+ edid_info = EDID_INFO;
+ apm_info.bios = APM_BIOS_INFO;
+ ist_info = IST_INFO;
+ saved_videomode = VIDEO_MODE;
+ if( SYS_DESC_TABLE.length != 0 ) {
+ set_mca_bus(SYS_DESC_TABLE.table[3] & 0x2);
+ machine_id = SYS_DESC_TABLE.table[0];
+ machine_submodel_id = SYS_DESC_TABLE.table[1];
+ BIOS_revision = SYS_DESC_TABLE.table[2];
+ }
+ aux_device_present = AUX_DEVICE_INFO;
+ bootloader_type = LOADER_TYPE;
+
+ #ifdef CONFIG_XEN_PHYSDEV_ACCESS
+ /* This is drawn from a dump from vgacon:startup in standard Linux. */
+ screen_info.orig_video_mode = 3;
+ screen_info.orig_video_isVGA = 1;
+ screen_info.orig_video_lines = 25;
+ screen_info.orig_video_cols = 80;
+ screen_info.orig_video_ega_bx = 3;
+ screen_info.orig_video_points = 16;
+ #endif
+
+ #ifdef CONFIG_BLK_DEV_RAM
+ rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
+ rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
+ rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
+ #endif
+ ARCH_SETUP
+ if (efi_enabled)
+ efi_init();
+ else {
+ printk(KERN_INFO "BIOS-provided physical RAM map:\n");
+ print_memory_map(machine_specific_memory_setup());
+ }
+
+ copy_edd();
+
+ if (!MOUNT_ROOT_RDONLY)
+ root_mountflags &= ~MS_RDONLY;
+ init_mm.start_code = (unsigned long) _text;
+ init_mm.end_code = (unsigned long) _etext;
+ init_mm.end_data = (unsigned long) _edata;
+ init_mm.brk = (PFN_UP(__pa(xen_start_info.pt_base)) + xen_start_info.nr_pt_frames) << PAGE_SHIFT;
+
+ /* XEN: This is nonsense: kernel may not even be contiguous in RAM. */
+ /*code_resource.start = virt_to_phys(_text);*/
+ /*code_resource.end = virt_to_phys(_etext)-1;*/
+ /*data_resource.start = virt_to_phys(_etext);*/
+ /*data_resource.end = virt_to_phys(_edata)-1;*/
+
+ parse_cmdline_early(cmdline_p);
+
+ max_low_pfn = setup_memory();
+
+ /*
+ * NOTE: before this point _nobody_ is allowed to allocate
+ * any memory using the bootmem allocator. Although the
+ * alloctor is now initialised only the first 8Mb of the kernel
+ * virtual address space has been mapped. All allocations before
+ * paging_init() has completed must use the alloc_bootmem_low_pages()
+ * variant (which allocates DMA'able memory) and care must be taken
+ * not to exceed the 8Mb limit.
+ */
+
+ #ifdef CONFIG_SMP
+ smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
+ #endif
+ paging_init();
+
+ /* Make sure we have a large enough P->M table. */
+ if (max_pfn > xen_start_info.nr_pages) {
+ phys_to_machine_mapping = alloc_bootmem_low_pages(
+ max_pfn * sizeof(unsigned long));
+ memset(phys_to_machine_mapping, ~0,
+ max_pfn * sizeof(unsigned long));
+ memcpy(phys_to_machine_mapping,
+ (unsigned long *)xen_start_info.mfn_list,
+ xen_start_info.nr_pages * sizeof(unsigned long));
+ free_bootmem(
+ __pa(xen_start_info.mfn_list),
+ PFN_PHYS(PFN_UP(xen_start_info.nr_pages *
+ sizeof(unsigned long))));
+ }
+
+ pfn_to_mfn_frame_list = alloc_bootmem_low_pages(PAGE_SIZE);
+ for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ )
+ {
+ pfn_to_mfn_frame_list[j] =
+ virt_to_machine(&phys_to_machine_mapping[i]) >> PAGE_SHIFT;
+ }
+ HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list =
+ virt_to_machine(pfn_to_mfn_frame_list) >> PAGE_SHIFT;
+
+
+ /*
+ * NOTE: at this point the bootmem allocator is fully available.
+ */
+
+ #ifdef CONFIG_EARLY_PRINTK
+ {
+ char *s = strstr(*cmdline_p, "earlyprintk=");
+ if (s) {
+ extern void setup_early_printk(char *);
+
+ setup_early_printk(s);
+ printk("early console enabled\n");
+ }
+ }
+ #endif
+
+
+ dmi_scan_machine();
+
+ #ifdef CONFIG_X86_GENERICARCH
+ generic_apic_probe(*cmdline_p);
+ #endif
+ if (efi_enabled)
+ efi_map_memmap();
+
+ /*
+ * Parse the ACPI tables for possible boot-time SMP configuration.
+ */
+ acpi_boot_table_init();
+ acpi_boot_init();
+
+ #ifdef CONFIG_X86_LOCAL_APIC
+ if (smp_found_config)
+ get_smp_config();
+ #endif
+
+ /* XXX Disable irqdebug until we have a way to avoid interrupt
+ * conflicts. */
+ noirqdebug_setup("");
+
+ register_memory();
+
+ /* If we are a privileged guest OS then we should request IO privs. */
+ if (xen_start_info.flags & SIF_PRIVILEGED) {
+ dom0_op_t op;
+ op.cmd = DOM0_IOPL;
+ op.u.iopl.domain = DOMID_SELF;
+ op.u.iopl.iopl = 1;
+ if (HYPERVISOR_dom0_op(&op) != 0)
+ panic("Unable to obtain IOPL, despite SIF_PRIVILEGED");
+ current->thread.io_pl = 1;
+ }
+
+ if (xen_start_info.flags & SIF_INITDOMAIN) {
+ if (!(xen_start_info.flags & SIF_PRIVILEGED))
+ panic("Xen granted us console access "
+ "but not privileged status");
+
+ #ifdef CONFIG_VT
+ #if defined(CONFIG_VGA_CONSOLE)
+ if (!efi_enabled ||
+ (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
+ conswitchp = &vga_con;
+ #elif defined(CONFIG_DUMMY_CONSOLE)
+ conswitchp = &dummy_con;
+ #endif
+ #endif
+ } else {
+ #ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ extern const struct consw xennull_con;
+ extern int console_use_vt;
+ #if defined(CONFIG_VGA_CONSOLE)
+ /* disable VGA driver */
+ ORIG_VIDEO_ISVGA = VIDEO_TYPE_VLFB;
+ #endif
+ conswitchp = &xennull_con;
+ console_use_vt = 0;
+ #endif
+ }
+ }
+
+ #include "setup_arch_post.h"
+ /*
+ * Local Variables:
+ * mode:c
+ * c-file-style:"k&r"
+ * c-basic-offset:8
+ * End:
+ */
--- /dev/null
-extern u64 processed_system_time;
+ /*
+ * linux/arch/i386/kernel/time.c
+ *
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
+ *
+ * This file contains the PC-specific time handling details:
+ * reading the RTC at bootup, etc..
+ * 1994-07-02 Alan Modra
+ * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
+ * 1995-03-26 Markus Kuhn
+ * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
+ * precision CMOS clock update
+ * 1996-05-03 Ingo Molnar
+ * fixed time warps in do_[slow|fast]_gettimeoffset()
+ * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
+ * "A Kernel Model for Precision Timekeeping" by Dave Mills
+ * 1998-09-05 (Various)
+ * More robust do_fast_gettimeoffset() algorithm implemented
+ * (works with APM, Cyrix 6x86MX and Centaur C6),
+ * monotonic gettimeofday() with fast_get_timeoffset(),
+ * drift-proof precision TSC calibration on boot
+ * (C. Scott Ananian <cananian@alumni.princeton.edu>, Andrew D.
+ * Balsa <andrebalsa@altern.org>, Philip Gladstone <philip@raptor.com>;
+ * ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause@tu-harburg.de>).
+ * 1998-12-16 Andrea Arcangeli
+ * Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy
+ * because was not accounting lost_ticks.
+ * 1998-12-24 Copyright (C) 1998 Andrea Arcangeli
+ * Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
+ * serialize accesses to xtime/lost_ticks).
+ */
+
+ #include <linux/errno.h>
+ #include <linux/sched.h>
+ #include <linux/kernel.h>
+ #include <linux/param.h>
+ #include <linux/string.h>
+ #include <linux/mm.h>
+ #include <linux/interrupt.h>
+ #include <linux/time.h>
+ #include <linux/delay.h>
+ #include <linux/init.h>
+ #include <linux/smp.h>
+ #include <linux/module.h>
+ #include <linux/sysdev.h>
+ #include <linux/bcd.h>
+ #include <linux/efi.h>
+ #include <linux/mca.h>
+ #include <linux/sysctl.h>
++#include <linux/percpu.h>
+
+ #include <asm/io.h>
+ #include <asm/smp.h>
+ #include <asm/irq.h>
+ #include <asm/msr.h>
+ #include <asm/delay.h>
+ #include <asm/mpspec.h>
+ #include <asm/uaccess.h>
+ #include <asm/processor.h>
+ #include <asm/timer.h>
+
+ #include "mach_time.h"
+
+ #include <linux/timex.h>
+ #include <linux/config.h>
+
+ #include <asm/hpet.h>
+
+ #include <asm/arch_hooks.h>
+
+ #include "io_ports.h"
+
+ extern spinlock_t i8259A_lock;
+ int pit_latch_buggy; /* extern */
+
+ u64 jiffies_64 = INITIAL_JIFFIES;
+
+ EXPORT_SYMBOL(jiffies_64);
+
+ unsigned long cpu_khz; /* Detected as we calibrate the TSC */
+
+ extern unsigned long wall_jiffies;
+
+ DEFINE_SPINLOCK(rtc_lock);
+
+ DEFINE_SPINLOCK(i8253_lock);
+ EXPORT_SYMBOL(i8253_lock);
+
+ extern struct init_timer_opts timer_tsc_init;
+ extern struct timer_opts timer_tsc;
+ struct timer_opts *cur_timer = &timer_tsc;
+
+ /* These are peridically updated in shared_info, and then copied here. */
+ u32 shadow_tsc_stamp;
+ u64 shadow_system_time;
+ static u32 shadow_time_version;
+ static struct timeval shadow_tv;
-#ifndef CONFIG_SMP
+
+ /*
+ * We use this to ensure that gettimeofday() is monotonically increasing. We
+ * only break this guarantee if the wall clock jumps backwards "a long way".
+ */
+ static struct timeval last_seen_tv = {0,0};
+
+ #ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ /* Periodically propagate synchronised time base to the RTC and to Xen. */
+ static long last_rtc_update, last_update_to_xen;
+ #endif
+
+ /* Periodically take synchronised time base from Xen, if we need it. */
+ static long last_update_from_xen; /* UTC seconds when last read Xen clock. */
+
+ /* Keep track of last time we did processing/updating of jiffies and xtime. */
+ u64 processed_system_time; /* System time (ns) at last processing. */
++DEFINE_PER_CPU(u64, processed_system_time);
+
+ #define NS_PER_TICK (1000000000ULL/HZ)
+
+ #define HANDLE_USEC_UNDERFLOW(_tv) do { \
+ while ((_tv).tv_usec < 0) { \
+ (_tv).tv_usec += USEC_PER_SEC; \
+ (_tv).tv_sec--; \
+ } \
+ } while (0)
+ #define HANDLE_USEC_OVERFLOW(_tv) do { \
+ while ((_tv).tv_usec >= USEC_PER_SEC) { \
+ (_tv).tv_usec -= USEC_PER_SEC; \
+ (_tv).tv_sec++; \
+ } \
+ } while (0)
+ static inline void __normalize_time(time_t *sec, s64 *nsec)
+ {
+ while (*nsec >= NSEC_PER_SEC) {
+ (*nsec) -= NSEC_PER_SEC;
+ (*sec)++;
+ }
+ while (*nsec < 0) {
+ (*nsec) += NSEC_PER_SEC;
+ (*sec)--;
+ }
+ }
+
+ /* Does this guest OS track Xen time, or set its wall clock independently? */
+ static int independent_wallclock = 0;
+ static int __init __independent_wallclock(char *str)
+ {
+ independent_wallclock = 1;
+ return 1;
+ }
+ __setup("independent_wallclock", __independent_wallclock);
+ #define INDEPENDENT_WALLCLOCK() \
+ (independent_wallclock || (xen_start_info.flags & SIF_INITDOMAIN))
+
+ /*
+ * Reads a consistent set of time-base values from Xen, into a shadow data
+ * area. Must be called with the xtime_lock held for writing.
+ */
+ static void __get_time_values_from_xen(void)
+ {
+ shared_info_t *s = HYPERVISOR_shared_info;
+
+ do {
+ shadow_time_version = s->time_version2;
+ rmb();
+ shadow_tv.tv_sec = s->wc_sec;
+ shadow_tv.tv_usec = s->wc_usec;
+ shadow_tsc_stamp = (u32)s->tsc_timestamp;
+ shadow_system_time = s->system_time;
+ rmb();
+ }
+ while (shadow_time_version != s->time_version1);
+
+ cur_timer->mark_offset();
+ }
+
+ #define TIME_VALUES_UP_TO_DATE \
+ ({ rmb(); (shadow_time_version == HYPERVISOR_shared_info->time_version2); })
+
+ /*
+ * This version of gettimeofday has microsecond resolution
+ * and better than microsecond precision on fast x86 machines with TSC.
+ */
+ void do_gettimeofday(struct timeval *tv)
+ {
+ unsigned long seq;
+ unsigned long usec, sec;
+ unsigned long max_ntp_tick;
+ unsigned long flags;
+ s64 nsec;
+
+ do {
+ unsigned long lost;
+
+ seq = read_seqbegin(&xtime_lock);
+
+ usec = cur_timer->get_offset();
+ lost = jiffies - wall_jiffies;
+
+ /*
+ * If time_adjust is negative then NTP is slowing the clock
+ * so make sure not to go into next possible interval.
+ * Better to lose some accuracy than have time go backwards..
+ */
+ if (unlikely(time_adjust < 0)) {
+ max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
+ usec = min(usec, max_ntp_tick);
+
+ if (lost)
+ usec += lost * max_ntp_tick;
+ }
+ else if (unlikely(lost))
+ usec += lost * (USEC_PER_SEC / HZ);
+
+ sec = xtime.tv_sec;
+ usec += (xtime.tv_nsec / NSEC_PER_USEC);
+
+ nsec = shadow_system_time - processed_system_time;
+ __normalize_time(&sec, &nsec);
+ usec += (long)nsec / NSEC_PER_USEC;
+
+ if (unlikely(!TIME_VALUES_UP_TO_DATE)) {
+ /*
+ * We may have blocked for a long time,
+ * rendering our calculations invalid
+ * (e.g. the time delta may have
+ * overflowed). Detect that and recalculate
+ * with fresh values.
+ */
+ write_seqlock_irqsave(&xtime_lock, flags);
+ __get_time_values_from_xen();
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+ continue;
+ }
+ } while (read_seqretry(&xtime_lock, seq));
+
+ while (usec >= USEC_PER_SEC) {
+ usec -= USEC_PER_SEC;
+ sec++;
+ }
+
+ /* Ensure that time-of-day is monotonically increasing. */
+ if ((sec < last_seen_tv.tv_sec) ||
+ ((sec == last_seen_tv.tv_sec) && (usec < last_seen_tv.tv_usec))) {
+ sec = last_seen_tv.tv_sec;
+ usec = last_seen_tv.tv_usec;
+ } else {
+ last_seen_tv.tv_sec = sec;
+ last_seen_tv.tv_usec = usec;
+ }
+
+ tv->tv_sec = sec;
+ tv->tv_usec = usec;
+ }
+
+ EXPORT_SYMBOL(do_gettimeofday);
+
+ int do_settimeofday(struct timespec *tv)
+ {
+ time_t wtm_sec, sec = tv->tv_sec;
+ long wtm_nsec;
+ s64 nsec;
+ struct timespec xentime;
+
+ if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+ return -EINVAL;
+
+ if (!INDEPENDENT_WALLCLOCK())
+ return 0; /* Silent failure? */
+
+ write_seqlock_irq(&xtime_lock);
+
+ /*
+ * Ensure we don't get blocked for a long time so that our time delta
+ * overflows. If that were to happen then our shadow time values would
+ * be stale, so we can retry with fresh ones.
+ */
+ again:
+ nsec = (s64)tv->tv_nsec -
+ ((s64)cur_timer->get_offset() * (s64)NSEC_PER_USEC);
+ if (unlikely(!TIME_VALUES_UP_TO_DATE)) {
+ __get_time_values_from_xen();
+ goto again;
+ }
+
+ __normalize_time(&sec, &nsec);
+ set_normalized_timespec(&xentime, sec, nsec);
+
+ /*
+ * This is revolting. We need to set "xtime" correctly. However, the
+ * value in this location is the value at the most recent update of
+ * wall time. Discover what correction gettimeofday() would have
+ * made, and then undo it!
+ */
+ nsec -= (jiffies - wall_jiffies) * TICK_NSEC;
+
+ nsec -= (shadow_system_time - processed_system_time);
+
+ __normalize_time(&sec, &nsec);
+ wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
+ wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+
+ set_normalized_timespec(&xtime, sec, nsec);
+ set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
+
+ time_adjust = 0; /* stop active adjtime() */
+ time_status |= STA_UNSYNC;
+ time_maxerror = NTP_PHASE_LIMIT;
+ time_esterror = NTP_PHASE_LIMIT;
+
+ /* Reset all our running time counts. They make no sense now. */
+ last_seen_tv.tv_sec = 0;
+ last_update_from_xen = 0;
+
+ #ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ if (xen_start_info.flags & SIF_INITDOMAIN) {
+ dom0_op_t op;
+ last_rtc_update = last_update_to_xen = 0;
+ op.cmd = DOM0_SETTIME;
+ op.u.settime.secs = xentime.tv_sec;
+ op.u.settime.usecs = xentime.tv_nsec / NSEC_PER_USEC;
+ op.u.settime.system_time = shadow_system_time;
+ write_sequnlock_irq(&xtime_lock);
+ HYPERVISOR_dom0_op(&op);
+ } else
+ #endif
+ write_sequnlock_irq(&xtime_lock);
+
+ clock_was_set();
+ return 0;
+ }
+
+ EXPORT_SYMBOL(do_settimeofday);
+
+ #ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ static int set_rtc_mmss(unsigned long nowtime)
+ {
+ int retval;
+
+ /* gets recalled with irq locally disabled */
+ spin_lock(&rtc_lock);
+ if (efi_enabled)
+ retval = efi_set_rtc_mmss(nowtime);
+ else
+ retval = mach_set_rtc_mmss(nowtime);
+ spin_unlock(&rtc_lock);
+
+ return retval;
+ }
+ #endif
+
+ /* monotonic_clock(): returns # of nanoseconds passed since time_init()
+ * Note: This function is required to return accurate
+ * time even in the absence of multiple timer ticks.
+ */
+ unsigned long long monotonic_clock(void)
+ {
+ return cur_timer->monotonic_clock();
+ }
+ EXPORT_SYMBOL(monotonic_clock);
+
+ #if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
+ unsigned long profile_pc(struct pt_regs *regs)
+ {
+ unsigned long pc = instruction_pointer(regs);
+
+ if (in_lock_functions(pc))
+ return *(unsigned long *)(regs->ebp + 4);
+
+ return pc;
+ }
+ EXPORT_SYMBOL(profile_pc);
+ #endif
+
+ /*
+ * timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick
+ */
+ static inline void do_timer_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs)
+ {
+ time_t wtm_sec, sec;
+ s64 delta, nsec;
+ long sec_diff, wtm_nsec;
+
+ do {
+ __get_time_values_from_xen();
+
+ delta = (s64)(shadow_system_time +
+ ((s64)cur_timer->get_offset() *
+ (s64)NSEC_PER_USEC) -
+ processed_system_time);
+ }
+ while (!TIME_VALUES_UP_TO_DATE);
+
+ if (unlikely(delta < 0)) {
+ printk("Timer ISR: Time went backwards: %lld %lld %lld %lld\n",
+ delta, shadow_system_time,
+ ((s64)cur_timer->get_offset() * (s64)NSEC_PER_USEC),
+ processed_system_time);
+ return;
+ }
+
+ /* Process elapsed jiffies since last call. */
+ while (delta >= NS_PER_TICK) {
+ delta -= NS_PER_TICK;
+ processed_system_time += NS_PER_TICK;
+ do_timer(regs);
-#endif
+ update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ if (regs)
- * would first get locked out. It is safe against normal
- * updates of jiffies since interrupts are off.
++ profile_tick(CPU_PROFILING, regs);
+ }
+
+ /*
+ * Take synchronised time from Xen once a minute if we're not
+ * synchronised ourselves, and we haven't chosen to keep an independent
+ * time base.
+ */
+ if (!INDEPENDENT_WALLCLOCK() &&
+ ((time_status & STA_UNSYNC) != 0) &&
+ (xtime.tv_sec > (last_update_from_xen + 60))) {
+ /* Adjust shadow for jiffies that haven't updated xtime yet. */
+ shadow_tv.tv_usec -=
+ (jiffies - wall_jiffies) * (USEC_PER_SEC / HZ);
+ HANDLE_USEC_UNDERFLOW(shadow_tv);
+
+ /*
+ * Reset our running time counts if they are invalidated by
+ * a warp backwards of more than 500ms.
+ */
+ sec_diff = xtime.tv_sec - shadow_tv.tv_sec;
+ if (unlikely(abs(sec_diff) > 1) ||
+ unlikely(((sec_diff * USEC_PER_SEC) +
+ (xtime.tv_nsec / NSEC_PER_USEC) -
+ shadow_tv.tv_usec) > 500000)) {
+ #ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ last_rtc_update = last_update_to_xen = 0;
+ #endif
+ last_seen_tv.tv_sec = 0;
+ }
+
+ /* Update our unsynchronised xtime appropriately. */
+ sec = shadow_tv.tv_sec;
+ nsec = shadow_tv.tv_usec * NSEC_PER_USEC;
+
+ __normalize_time(&sec, &nsec);
+ wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
+ wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+
+ set_normalized_timespec(&xtime, sec, nsec);
+ set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
+
+ last_update_from_xen = sec;
+ }
+
+ #ifdef CONFIG_XEN_PRIVILEGED_GUEST
+ if (!(xen_start_info.flags & SIF_INITDOMAIN))
+ return;
+
+ /* Send synchronised time to Xen approximately every minute. */
+ if (((time_status & STA_UNSYNC) == 0) &&
+ (xtime.tv_sec > (last_update_to_xen + 60))) {
+ dom0_op_t op;
+ struct timeval tv;
+
+ tv.tv_sec = xtime.tv_sec;
+ tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC;
+ tv.tv_usec += (jiffies - wall_jiffies) * (USEC_PER_SEC/HZ);
+ HANDLE_USEC_OVERFLOW(tv);
+
+ op.cmd = DOM0_SETTIME;
+ op.u.settime.secs = tv.tv_sec;
+ op.u.settime.usecs = tv.tv_usec;
+ op.u.settime.system_time = shadow_system_time;
+ HYPERVISOR_dom0_op(&op);
+
+ last_update_to_xen = xtime.tv_sec;
+ }
+
+ /*
+ * If we have an externally synchronized Linux clock, then update
+ * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
+ * called as close as possible to 500 ms before the new second starts.
+ */
+ if ((time_status & STA_UNSYNC) == 0 &&
+ xtime.tv_sec > last_rtc_update + 660 &&
+ (xtime.tv_nsec / 1000)
+ >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
+ (xtime.tv_nsec / 1000)
+ <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2) {
+ /* horrible...FIXME */
+ if (efi_enabled) {
+ if (efi_set_rtc_mmss(xtime.tv_sec) == 0)
+ last_rtc_update = xtime.tv_sec;
+ else
+ last_rtc_update = xtime.tv_sec - 600;
+ } else if (set_rtc_mmss(xtime.tv_sec) == 0)
+ last_rtc_update = xtime.tv_sec;
+ else
+ last_rtc_update = xtime.tv_sec - 600; /* do it again in 60 s */
+ }
+ #endif
+ }
+
+ /*
+ * This is the same as the above, except we _also_ save the current
+ * Time Stamp Counter value at the time of the timer interrupt, so that
+ * we later on can estimate the time of day more exactly.
+ */
+ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+ {
+ /*
+ * Here we are in the timer irq handler. We just have irqs locally
+ * disabled but we don't know if the timer_bh is running on the other
+ * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
+ * the irq version of write_lock because as just said we have irq
+ * locally disabled. -arca
+ */
+ write_seqlock(&xtime_lock);
+ do_timer_interrupt(irq, NULL, regs);
+ write_sequnlock(&xtime_lock);
+ return IRQ_HANDLED;
+ }
+
+ /* not static: needed by APM */
+ unsigned long get_cmos_time(void)
+ {
+ unsigned long retval;
+
+ spin_lock(&rtc_lock);
+
+ if (efi_enabled)
+ retval = efi_get_time();
+ else
+ retval = mach_get_cmos_time();
+
+ spin_unlock(&rtc_lock);
+
+ return retval;
+ }
+
+ static long clock_cmos_diff, sleep_start;
+
+ static int timer_suspend(struct sys_device *dev, u32 state)
+ {
+ /*
+ * Estimate time zone so that set_time can update the clock
+ */
+ clock_cmos_diff = -get_cmos_time();
+ clock_cmos_diff += get_seconds();
+ sleep_start = get_cmos_time();
+ return 0;
+ }
+
+ static int timer_resume(struct sys_device *dev)
+ {
+ unsigned long flags;
+ unsigned long sec;
+ unsigned long sleep_length;
+
+ #ifdef CONFIG_HPET_TIMER
+ if (is_hpet_enabled())
+ hpet_reenable();
+ #endif
+ sec = get_cmos_time() + clock_cmos_diff;
+ sleep_length = (get_cmos_time() - sleep_start) * HZ;
+ write_seqlock_irqsave(&xtime_lock, flags);
+ xtime.tv_sec = sec;
+ xtime.tv_nsec = 0;
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+ jiffies += sleep_length;
+ wall_jiffies += sleep_length;
+ return 0;
+ }
+
+ static struct sysdev_class timer_sysclass = {
+ .resume = timer_resume,
+ .suspend = timer_suspend,
+ set_kset_name("timer"),
+ };
+
+
+ /* XXX this driverfs stuff should probably go elsewhere later -john */
+ static struct sys_device device_timer = {
+ .id = 0,
+ .cls = &timer_sysclass,
+ };
+
+ static int time_init_device(void)
+ {
+ int error = sysdev_class_register(&timer_sysclass);
+ if (!error)
+ error = sysdev_register(&device_timer);
+ return error;
+ }
+
+ device_initcall(time_init_device);
+
+ #ifdef CONFIG_HPET_TIMER
+ extern void (*late_time_init)(void);
+ /* Duplicate of time_init() below, with hpet_enable part added */
+ void __init hpet_time_init(void)
+ {
+ xtime.tv_sec = get_cmos_time();
+ xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
+ set_normalized_timespec(&wall_to_monotonic,
+ -xtime.tv_sec, -xtime.tv_nsec);
+
+ if (hpet_enable() >= 0) {
+ printk("Using HPET for base-timer\n");
+ }
+
+ cur_timer = select_timer();
+ printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
+
+ time_init_hook();
+ }
+ #endif
+
+ /* Dynamically-mapped IRQ. */
+ static int TIMER_IRQ;
+
+ static struct irqaction irq_timer = {
+ timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer",
+ NULL, NULL
+ };
+
+ void __init time_init(void)
+ {
+ #ifdef CONFIG_HPET_TIMER
+ if (is_hpet_capable()) {
+ /*
+ * HPET initialization needs to do memory-mapped io. So, let
+ * us do a late initialization after mem_init().
+ */
+ late_time_init = hpet_time_init;
+ return;
+ }
+ #endif
+ __get_time_values_from_xen();
+ xtime.tv_sec = shadow_tv.tv_sec;
+ xtime.tv_nsec = shadow_tv.tv_usec * NSEC_PER_USEC;
+ set_normalized_timespec(&wall_to_monotonic,
+ -xtime.tv_sec, -xtime.tv_nsec);
+ processed_system_time = shadow_system_time;
+
+ if (timer_tsc_init.init(NULL) != 0)
+ BUG();
+ printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
+
+ TIMER_IRQ = bind_virq_to_irq(VIRQ_TIMER);
+
+ (void)setup_irq(TIMER_IRQ, &irq_timer);
+ }
+
+ /* Convert jiffies to system time. Call with xtime_lock held for reading. */
+ static inline u64 __jiffies_to_st(unsigned long j)
+ {
+ return processed_system_time + ((j - jiffies) * NS_PER_TICK);
+ }
+
+ /*
+ * This function works out when the the next timer function has to be
+ * executed (by looking at the timer list) and sets the Xen one-shot
+ * domain timer to the appropriate value. This is typically called in
+ * cpu_idle() before the domain blocks.
+ *
+ * The function returns a non-0 value on error conditions.
+ *
+ * It must be called with interrupts disabled.
+ */
+ int set_timeout_timer(void)
+ {
+ u64 alarm = 0;
+ int ret = 0;
++#ifdef CONFIG_SMP
++ unsigned long seq;
++#endif
+
+ /*
+ * This is safe against long blocking (since calculations are
+ * not based on TSC deltas). It is also safe against warped
+ * system time since suspend-resume is cooperative and we
++ * would first get locked out.
+ */
++#ifdef CONFIG_SMP
++ do {
++ seq = read_seqbegin(&xtime_lock);
++ if (smp_processor_id())
++ alarm = __jiffies_to_st(jiffies + 1);
++ else
++ alarm = __jiffies_to_st(jiffies + 1);
++ } while (read_seqretry(&xtime_lock, seq));
++#else
+ alarm = __jiffies_to_st(next_timer_interrupt());
++#endif
+
+ /* Failure is pretty bad, but we'd best soldier on. */
+ if ( HYPERVISOR_set_timer_op(alarm) != 0 )
+ ret = -1;
+
+ return ret;
+ }
+
+ void time_suspend(void)
+ {
+ /* nothing */
+ }
+
+ /* No locking required. We are only CPU running, and interrupts are off. */
+ void time_resume(void)
+ {
+ if (timer_tsc_init.init(NULL) != 0)
+ BUG();
+
+ /* Get timebases for new environment. */
+ __get_time_values_from_xen();
+
+ /* Reset our own concept of passage of system time. */
+ processed_system_time = shadow_system_time;
+
+ /* Accept a warp in UTC (wall-clock) time. */
+ last_seen_tv.tv_sec = 0;
+
+ /* Make sure we resync UTC time with Xen on next timer interrupt. */
+ last_update_from_xen = 0;
+ }
+
++#ifdef CONFIG_SMP
++#define xxprint(msg) HYPERVISOR_console_io(CONSOLEIO_write, strlen(msg), msg)
++
++static irqreturn_t local_timer_interrupt(int irq, void *dev_id,
++ struct pt_regs *regs)
++{
++ s64 delta;
++ int cpu = smp_processor_id();
++
++ do {
++ __get_time_values_from_xen();
++
++ delta = (s64)(shadow_system_time +
++ ((s64)cur_timer->get_offset() *
++ (s64)NSEC_PER_USEC) -
++ per_cpu(processed_system_time, cpu));
++ }
++ while (!TIME_VALUES_UP_TO_DATE);
++
++ if (unlikely(delta < 0)) {
++ printk("Timer ISR/%d: Time went backwards: %lld %lld %lld %lld\n",
++ cpu, delta, shadow_system_time,
++ ((s64)cur_timer->get_offset() * (s64)NSEC_PER_USEC),
++ processed_system_time);
++ return IRQ_HANDLED;
++ }
++
++ /* Process elapsed jiffies since last call. */
++ while (delta >= NS_PER_TICK) {
++ delta -= NS_PER_TICK;
++ per_cpu(processed_system_time, cpu) += NS_PER_TICK;
++ if (regs)
++ update_process_times(user_mode(regs));
++#if 0
++ if (regs)
++ profile_tick(CPU_PROFILING, regs);
++#endif
++ }
++
++ if (smp_processor_id() == 0) {
++ xxprint("bug bug\n");
++ BUG();
++ }
++
++ return IRQ_HANDLED;
++}
++
++static struct irqaction local_irq_timer = {
++ local_timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "ltimer",
++ NULL, NULL
++};
++
++void local_setup_timer(void)
++{
++ int seq, time_irq;
++ int cpu = smp_processor_id();
++
++ do {
++ seq = read_seqbegin(&xtime_lock);
++ per_cpu(processed_system_time, cpu) = shadow_system_time;
++ } while (read_seqretry(&xtime_lock, seq));
++
++ time_irq = bind_virq_to_irq(VIRQ_TIMER);
++ (void)setup_irq(time_irq, &local_irq_timer);
++}
++#endif
++
+ /*
+ * /proc/sys/xen: This really belongs in another file. It can stay here for
+ * now however.
+ */
+ static ctl_table xen_subtable[] = {
+ {1, "independent_wallclock", &independent_wallclock,
+ sizeof(independent_wallclock), 0644, NULL, proc_dointvec},
+ {0}
+ };
+ static ctl_table xen_table[] = {
+ {123, "xen", NULL, 0, 0555, xen_subtable},
+ {0}
+ };
+ static int __init xen_sysctl_init(void)
+ {
+ (void)register_sysctl_table(xen_table, 0);
+ return 0;
+ }
+ __initcall(xen_sysctl_init);
--- /dev/null
- /*
- * A trap in kernel mode can be ignored. It'll be the fast XOR or
- * copying libraries, which will correctly save/restore state and
- * reset the TS bit in CR0.
- */
- if ((regs.xcs & 2) == 0)
- return;
-
- clts(); /* Allow maths ops (or we recurse) */
+ /*
+ * linux/arch/i386/traps.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * Pentium III FXSR, SSE support
+ * Gareth Hughes <gareth@valinux.com>, May 2000
+ */
+
+ /*
+ * 'Traps.c' handles hardware traps and faults after we have saved some
+ * state in 'asm.s'.
+ */
+ #include <linux/config.h>
+ #include <linux/sched.h>
+ #include <linux/kernel.h>
+ #include <linux/string.h>
+ #include <linux/errno.h>
+ #include <linux/timer.h>
+ #include <linux/mm.h>
+ #include <linux/init.h>
+ #include <linux/delay.h>
+ #include <linux/spinlock.h>
+ #include <linux/interrupt.h>
+ #include <linux/highmem.h>
+ #include <linux/kallsyms.h>
+ #include <linux/ptrace.h>
+ #include <linux/utsname.h>
+ #include <linux/kprobes.h>
+
+ #ifdef CONFIG_EISA
+ #include <linux/ioport.h>
+ #include <linux/eisa.h>
+ #endif
+
+ #ifdef CONFIG_MCA
+ #include <linux/mca.h>
+ #endif
+
+ #include <asm/processor.h>
+ #include <asm/system.h>
+ #include <asm/uaccess.h>
+ #include <asm/io.h>
+ #include <asm/atomic.h>
+ #include <asm/debugreg.h>
+ #include <asm/desc.h>
+ #include <asm/i387.h>
+ #include <asm/nmi.h>
+
+ #include <asm/smp.h>
+ #include <asm/arch_hooks.h>
+ #include <asm/kdebug.h>
+
+ #include <linux/irq.h>
+ #include <linux/module.h>
+
+ #include "mach_traps.h"
+
+ asmlinkage int system_call(void);
+
+ /* Do we ignore FPU interrupts ? */
+ char ignore_fpu_irq = 0;
+
+ /*
+ * The IDT has to be page-aligned to simplify the Pentium
+ * F0 0F bug workaround.. We have a special link segment
+ * for this.
+ */
+ struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
+
+ asmlinkage void divide_error(void);
+ asmlinkage void debug(void);
+ asmlinkage void nmi(void);
+ asmlinkage void int3(void);
+ asmlinkage void overflow(void);
+ asmlinkage void bounds(void);
+ asmlinkage void invalid_op(void);
+ asmlinkage void device_not_available(void);
+ asmlinkage void coprocessor_segment_overrun(void);
+ asmlinkage void invalid_TSS(void);
+ asmlinkage void segment_not_present(void);
+ asmlinkage void stack_segment(void);
+ asmlinkage void general_protection(void);
+ asmlinkage void page_fault(void);
+ asmlinkage void coprocessor_error(void);
+ asmlinkage void simd_coprocessor_error(void);
+ asmlinkage void alignment_check(void);
+ asmlinkage void fixup_4gb_segment(void);
+ asmlinkage void machine_check(void);
+
+ static int kstack_depth_to_print = 24;
+ struct notifier_block *i386die_chain;
+ static DEFINE_SPINLOCK(die_notifier_lock);
+
+ int register_die_notifier(struct notifier_block *nb)
+ {
+ int err = 0;
+ unsigned long flags;
+ spin_lock_irqsave(&die_notifier_lock, flags);
+ err = notifier_chain_register(&i386die_chain, nb);
+ spin_unlock_irqrestore(&die_notifier_lock, flags);
+ return err;
+ }
+
+ static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
+ {
+ return p > (void *)tinfo &&
+ p < (void *)tinfo + THREAD_SIZE - 3;
+ }
+
+ static inline unsigned long print_context_stack(struct thread_info *tinfo,
+ unsigned long *stack, unsigned long ebp)
+ {
+ unsigned long addr;
+
+ #ifdef CONFIG_FRAME_POINTER
+ while (valid_stack_ptr(tinfo, (void *)ebp)) {
+ addr = *(unsigned long *)(ebp + 4);
+ printk(" [<%08lx>] ", addr);
+ print_symbol("%s", addr);
+ printk("\n");
+ ebp = *(unsigned long *)ebp;
+ }
+ #else
+ while (valid_stack_ptr(tinfo, stack)) {
+ addr = *stack++;
+ if (__kernel_text_address(addr)) {
+ printk(" [<%08lx>]", addr);
+ print_symbol(" %s", addr);
+ printk("\n");
+ }
+ }
+ #endif
+ return ebp;
+ }
+
+ void show_trace(struct task_struct *task, unsigned long * stack)
+ {
+ unsigned long ebp;
+
+ if (!task)
+ task = current;
+
+ if (task == current) {
+ /* Grab ebp right from our regs */
+ asm ("movl %%ebp, %0" : "=r" (ebp) : );
+ } else {
+ /* ebp is the last reg pushed by switch_to */
+ ebp = *(unsigned long *) task->thread.esp;
+ }
+
+ while (1) {
+ struct thread_info *context;
+ context = (struct thread_info *)
+ ((unsigned long)stack & (~(THREAD_SIZE - 1)));
+ ebp = print_context_stack(context, stack, ebp);
+ stack = (unsigned long*)context->previous_esp;
+ if (!stack)
+ break;
+ printk(" =======================\n");
+ }
+ }
+
+ void show_stack(struct task_struct *task, unsigned long *esp)
+ {
+ unsigned long *stack;
+ int i;
+
+ if (esp == NULL) {
+ if (task)
+ esp = (unsigned long*)task->thread.esp;
+ else
+ esp = (unsigned long *)&esp;
+ }
+
+ stack = esp;
+ for(i = 0; i < kstack_depth_to_print; i++) {
+ if (kstack_end(stack))
+ break;
+ if (i && ((i % 8) == 0))
+ printk("\n ");
+ printk("%08lx ", *stack++);
+ }
+ printk("\nCall Trace:\n");
+ show_trace(task, esp);
+ }
+
+ /*
+ * The architecture-independent dump_stack generator
+ */
+ void dump_stack(void)
+ {
+ unsigned long stack;
+
+ show_trace(current, &stack);
+ }
+
+ EXPORT_SYMBOL(dump_stack);
+
+ void show_registers(struct pt_regs *regs)
+ {
+ int i;
+ int in_kernel = 1;
+ unsigned long esp;
+ unsigned short ss;
+
+ esp = (unsigned long) (®s->esp);
+ ss = __KERNEL_DS;
+ if (regs->xcs & 2) {
+ in_kernel = 0;
+ esp = regs->esp;
+ ss = regs->xss & 0xffff;
+ }
+ print_modules();
+ printk("CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\nEFLAGS: %08lx"
+ " (%s) \n",
+ smp_processor_id(), 0xffff & regs->xcs, regs->eip,
+ print_tainted(), regs->eflags, system_utsname.release);
+ print_symbol("EIP is at %s\n", regs->eip);
+ printk("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
+ regs->eax, regs->ebx, regs->ecx, regs->edx);
+ printk("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
+ regs->esi, regs->edi, regs->ebp, esp);
+ printk("ds: %04x es: %04x ss: %04x\n",
+ regs->xds & 0xffff, regs->xes & 0xffff, ss);
+ printk("Process %s (pid: %d, threadinfo=%p task=%p)",
+ current->comm, current->pid, current_thread_info(), current);
+ /*
+ * When in-kernel, we also print out the stack and code at the
+ * time of the fault..
+ */
+ if (in_kernel) {
+ u8 *eip;
+
+ printk("\nStack: ");
+ show_stack(NULL, (unsigned long*)esp);
+
+ printk("Code: ");
+
+ eip = (u8 *)regs->eip - 43;
+ for (i = 0; i < 64; i++, eip++) {
+ unsigned char c;
+
+ if (eip < (u8 *)PAGE_OFFSET || __get_user(c, eip)) {
+ printk(" Bad EIP value.");
+ break;
+ }
+ if (eip == (u8 *)regs->eip)
+ printk("<%02x> ", c);
+ else
+ printk("%02x ", c);
+ }
+ }
+ printk("\n");
+ }
+
+ static void handle_BUG(struct pt_regs *regs)
+ {
+ unsigned short ud2;
+ unsigned short line;
+ char *file;
+ char c;
+ unsigned long eip;
+
+ if (regs->xcs & 2)
+ goto no_bug; /* Not in kernel */
+
+ eip = regs->eip;
+
+ if (eip < PAGE_OFFSET)
+ goto no_bug;
+ if (__get_user(ud2, (unsigned short *)eip))
+ goto no_bug;
+ if (ud2 != 0x0b0f)
+ goto no_bug;
+ if (__get_user(line, (unsigned short *)(eip + 2)))
+ goto bug;
+ if (__get_user(file, (char **)(eip + 4)) ||
+ (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
+ file = "<bad filename>";
+
+ printk("------------[ cut here ]------------\n");
+ printk(KERN_ALERT "kernel BUG at %s:%d!\n", file, line);
+
+ no_bug:
+ return;
+
+ /* Here we know it was a BUG but file-n-line is unavailable */
+ bug:
+ printk("Kernel BUG\n");
+ }
+
+ void die(const char * str, struct pt_regs * regs, long err)
+ {
+ static struct {
+ spinlock_t lock;
+ u32 lock_owner;
+ int lock_owner_depth;
+ } die = {
+ .lock = SPIN_LOCK_UNLOCKED,
+ .lock_owner = -1,
+ .lock_owner_depth = 0
+ };
+ static int die_counter;
+
+ if (die.lock_owner != _smp_processor_id()) {
+ console_verbose();
+ spin_lock_irq(&die.lock);
+ die.lock_owner = smp_processor_id();
+ die.lock_owner_depth = 0;
+ bust_spinlocks(1);
+ }
+
+ if (++die.lock_owner_depth < 3) {
+ int nl = 0;
+ handle_BUG(regs);
+ printk(KERN_ALERT "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
+ #ifdef CONFIG_PREEMPT
+ printk("PREEMPT ");
+ nl = 1;
+ #endif
+ #ifdef CONFIG_SMP
+ printk("SMP ");
+ nl = 1;
+ #endif
+ #ifdef CONFIG_DEBUG_PAGEALLOC
+ printk("DEBUG_PAGEALLOC");
+ nl = 1;
+ #endif
+ if (nl)
+ printk("\n");
+ notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
+ show_registers(regs);
+ } else
+ printk(KERN_ERR "Recursive die() failure, output suppressed\n");
+
+ bust_spinlocks(0);
+ die.lock_owner = -1;
+ spin_unlock_irq(&die.lock);
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+
+ if (panic_on_oops) {
+ printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(5 * HZ);
+ panic("Fatal exception");
+ }
+ do_exit(SIGSEGV);
+ }
+
+ static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
+ {
+ if (!(regs->eflags & VM_MASK) && !(2 & regs->xcs))
+ die(str, regs, err);
+ }
+
+ static void do_trap(int trapnr, int signr, char *str, int vm86,
+ struct pt_regs * regs, long error_code, siginfo_t *info)
+ {
+ if (regs->eflags & VM_MASK) {
+ if (vm86)
+ goto vm86_trap;
+ goto trap_signal;
+ }
+
+ if (!(regs->xcs & 2))
+ goto kernel_trap;
+
+ trap_signal: {
+ struct task_struct *tsk = current;
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = trapnr;
+ if (info)
+ force_sig_info(signr, info, tsk);
+ else
+ force_sig(signr, tsk);
+ return;
+ }
+
+ kernel_trap: {
+ if (!fixup_exception(regs))
+ die(str, regs, error_code);
+ return;
+ }
+
+ vm86_trap: {
+ int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
+ if (ret) goto trap_signal;
+ return;
+ }
+ }
+
+ #define DO_ERROR(trapnr, signr, str, name) \
+ fastcall void do_##name(struct pt_regs * regs, long error_code) \
+ { \
+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
+ == NOTIFY_STOP) \
+ return; \
+ do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
+ }
+
+ #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
+ fastcall void do_##name(struct pt_regs * regs, long error_code) \
+ { \
+ siginfo_t info; \
+ info.si_signo = signr; \
+ info.si_errno = 0; \
+ info.si_code = sicode; \
+ info.si_addr = (void __user *)siaddr; \
+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
+ == NOTIFY_STOP) \
+ return; \
+ do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
+ }
+
+ #define DO_VM86_ERROR(trapnr, signr, str, name) \
+ fastcall void do_##name(struct pt_regs * regs, long error_code) \
+ { \
+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
+ == NOTIFY_STOP) \
+ return; \
+ do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
+ }
+
+ #define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
+ fastcall void do_##name(struct pt_regs * regs, long error_code) \
+ { \
+ siginfo_t info; \
+ info.si_signo = signr; \
+ info.si_errno = 0; \
+ info.si_code = sicode; \
+ info.si_addr = (void __user *)siaddr; \
+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
+ == NOTIFY_STOP) \
+ return; \
+ do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
+ }
+
+ DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)
+ #ifndef CONFIG_KPROBES
+ DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
+ #endif
+ DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
+ DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
+ DO_ERROR_INFO( 6, SIGILL, "invalid operand", invalid_op, ILL_ILLOPN, regs->eip)
+ DO_VM86_ERROR( 7, SIGSEGV, "device not available", device_not_available)
+ DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
+ DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
+ DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
+ DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
+ DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
+ #ifdef CONFIG_X86_MCE
+ DO_ERROR(18, SIGBUS, "machine check", machine_check)
+ #endif
+
+ fastcall void do_general_protection(struct pt_regs * regs, long error_code)
+ {
+ /*
+ * If we trapped on an LDT access then ensure that the default_ldt is
+ * loaded, if nothing else. We load default_ldt lazily because LDT
+ * switching costs time and many applications don't need it.
+ */
+ if (unlikely((error_code & 6) == 4)) {
+ unsigned long ldt;
+ __asm__ __volatile__ ("sldt %0" : "=r" (ldt));
+ if (ldt == 0) {
+ mmu_update_t u;
+ u.ptr = MMU_EXTENDED_COMMAND;
+ u.ptr |= (unsigned long)&default_ldt[0];
+ u.val = MMUEXT_SET_LDT | (5 << MMUEXT_CMD_SHIFT);
+ if (unlikely(HYPERVISOR_mmu_update(&u, 1, NULL) < 0)) {
+ show_trace(NULL, (unsigned long *)&u);
+ panic("Failed to install default LDT");
+ }
+ return;
+ }
+ }
+
+ if (regs->eflags & VM_MASK)
+ goto gp_in_vm86;
+
+ if (!(regs->xcs & 2))
+ goto gp_in_kernel;
+
+ current->thread.error_code = error_code;
+ current->thread.trap_no = 13;
+ force_sig(SIGSEGV, current);
+ return;
+
+ gp_in_vm86:
+ local_irq_enable();
+ handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
+ return;
+
+ gp_in_kernel:
+ if (!fixup_exception(regs)) {
+ if (notify_die(DIE_GPF, "general protection fault", regs,
+ error_code, 13, SIGSEGV) == NOTIFY_STOP)
+ return;
+ die("general protection fault", regs, error_code);
+ }
+ }
+
+ static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
+ {
+ printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
+ printk("You probably have a hardware problem with your RAM chips\n");
+
+ /* Clear and disable the memory parity error line. */
+ clear_mem_error(reason);
+ }
+
+ static void io_check_error(unsigned char reason, struct pt_regs * regs)
+ {
+ unsigned long i;
+
+ printk("NMI: IOCK error (debug interrupt?)\n");
+ show_registers(regs);
+
+ /* Re-enable the IOCK line, wait for a few seconds */
+ reason = (reason & 0xf) | 8;
+ outb(reason, 0x61);
+ i = 2000;
+ while (--i) udelay(1000);
+ reason &= ~8;
+ outb(reason, 0x61);
+ }
+
+ static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
+ {
+ #ifdef CONFIG_MCA
+ /* Might actually be able to figure out what the guilty party
+ * is. */
+ if( MCA_bus ) {
+ mca_handle_nmi();
+ return;
+ }
+ #endif
+ printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
+ reason, smp_processor_id());
+ printk("Dazed and confused, but trying to continue\n");
+ printk("Do you have a strange power saving mode enabled?\n");
+ }
+
+ static DEFINE_SPINLOCK(nmi_print_lock);
+
+ void die_nmi (struct pt_regs *regs, const char *msg)
+ {
+ spin_lock(&nmi_print_lock);
+ /*
+ * We are in trouble anyway, lets at least try
+ * to get a message out.
+ */
+ bust_spinlocks(1);
+ printk(msg);
+ printk(" on CPU%d, eip %08lx, registers:\n",
+ smp_processor_id(), regs->eip);
+ show_registers(regs);
+ printk("console shuts up ...\n");
+ console_silent();
+ spin_unlock(&nmi_print_lock);
+ bust_spinlocks(0);
+ do_exit(SIGSEGV);
+ }
+
+ static void default_do_nmi(struct pt_regs * regs)
+ {
+ unsigned char reason = 0;
+
+ /* Only the BSP gets external NMIs from the system. */
+ if (!smp_processor_id())
+ reason = get_nmi_reason();
+
+ if (!(reason & 0xc0)) {
+ if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 0, SIGINT)
+ == NOTIFY_STOP)
+ return;
+ #ifdef CONFIG_X86_LOCAL_APIC
+ /*
+ * Ok, so this is none of the documented NMI sources,
+ * so it must be the NMI watchdog.
+ */
+ if (nmi_watchdog) {
+ nmi_watchdog_tick(regs);
+ return;
+ }
+ #endif
+ unknown_nmi_error(reason, regs);
+ return;
+ }
+ if (notify_die(DIE_NMI, "nmi", regs, reason, 0, SIGINT) == NOTIFY_STOP)
+ return;
+ if (reason & 0x80)
+ mem_parity_error(reason, regs);
+ if (reason & 0x40)
+ io_check_error(reason, regs);
+ /*
+ * Reassert NMI in case it became active meanwhile
+ * as it's edge-triggered.
+ */
+ reassert_nmi();
+ }
+
+ static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
+ {
+ return 0;
+ }
+
+ static nmi_callback_t nmi_callback = dummy_nmi_callback;
+
+ fastcall void do_nmi(struct pt_regs * regs, long error_code)
+ {
+ int cpu;
+
+ nmi_enter();
+
+ cpu = smp_processor_id();
+ ++nmi_count(cpu);
+
+ if (!nmi_callback(regs, cpu))
+ default_do_nmi(regs);
+
+ nmi_exit();
+ }
+
+ void set_nmi_callback(nmi_callback_t callback)
+ {
+ nmi_callback = callback;
+ }
+
+ void unset_nmi_callback(void)
+ {
+ nmi_callback = dummy_nmi_callback;
+ }
+
+ #ifdef CONFIG_KPROBES
+ fastcall int do_int3(struct pt_regs *regs, long error_code)
+ {
+ if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
+ == NOTIFY_STOP)
+ return 1;
+ /* This is an interrupt gate, because kprobes wants interrupts
+ disabled. Normal trap handlers don't. */
+ restore_interrupts(regs);
+ do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
+ return 0;
+ }
+ #endif
+
+ /*
+ * Our handling of the processor debug registers is non-trivial.
+ * We do not clear them on entry and exit from the kernel. Therefore
+ * it is possible to get a watchpoint trap here from inside the kernel.
+ * However, the code in ./ptrace.c has ensured that the user can
+ * only set watchpoints on userspace addresses. Therefore the in-kernel
+ * watchpoint trap can only occur in code which is reading/writing
+ * from user space. Such code must not hold kernel locks (since it
+ * can equally take a page fault), therefore it is safe to call
+ * force_sig_info even though that claims and releases locks.
+ *
+ * Code in ./signal.c ensures that the debug control register
+ * is restored before we deliver any signal, and therefore that
+ * user code runs with the correct debug control register even though
+ * we clear it here.
+ *
+ * Being careful here means that we don't have to be as careful in a
+ * lot of more complicated places (task switching can be a bit lazy
+ * about restoring all the debug state, and ptrace doesn't have to
+ * find every occurrence of the TF bit that could be saved away even
+ * by user code)
+ */
+ fastcall void do_debug(struct pt_regs * regs, long error_code)
+ {
+ unsigned int condition;
+ struct task_struct *tsk = current;
+
+ condition = HYPERVISOR_get_debugreg(6);
+
+ if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
+ SIGTRAP) == NOTIFY_STOP)
+ return;
+ #if 0
+ /* It's safe to allow irq's after DR6 has been saved */
+ if (regs->eflags & X86_EFLAGS_IF)
+ local_irq_enable();
+ #endif
+
+ /* Mask out spurious debug traps due to lazy DR7 setting */
+ if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
+ if (!tsk->thread.debugreg[7])
+ goto clear_dr7;
+ }
+
+ if (regs->eflags & VM_MASK)
+ goto debug_vm86;
+
+ /* Save debug status register where ptrace can see it */
+ tsk->thread.debugreg[6] = condition;
+
+ /*
+ * Single-stepping through TF: make sure we ignore any events in
+ * kernel space (but re-enable TF when returning to user mode).
+ * And if the event was due to a debugger (PT_DTRACE), clear the
+ * TF flag so that register information is correct.
+ */
+ if (condition & DR_STEP) {
+ /*
+ * We already checked v86 mode above, so we can
+ * check for kernel mode by just checking the CPL
+ * of CS.
+ */
+ if ((regs->xcs & 2) == 0)
+ goto clear_TF_reenable;
+
+ if (likely(tsk->ptrace & PT_DTRACE)) {
+ tsk->ptrace &= ~PT_DTRACE;
+ regs->eflags &= ~TF_MASK;
+ }
+ }
+
+ /* Ok, finally something we can handle */
+ send_sigtrap(tsk, regs, error_code);
+
+ /* Disable additional traps. They'll be re-enabled when
+ * the signal is delivered.
+ */
+ clear_dr7:
+ HYPERVISOR_set_debugreg(7, 0);
+ return;
+
+ debug_vm86:
+ handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
+ return;
+
+ clear_TF_reenable:
+ set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
+ regs->eflags &= ~TF_MASK;
+ return;
+ }
+
+ /*
+ * Note that we play around with the 'TS' bit in an attempt to get
+ * the correct behaviour even in the presence of the asynchronous
+ * IRQ13 behaviour
+ */
+ void math_error(void __user *eip)
+ {
+ struct task_struct * task;
+ siginfo_t info;
+ unsigned short cwd, swd;
+
+ /*
+ * Save the info for the exception handler and clear the error.
+ */
+ task = current;
+ save_init_fpu(task);
+ task->thread.trap_no = 16;
+ task->thread.error_code = 0;
+ info.si_signo = SIGFPE;
+ info.si_errno = 0;
+ info.si_code = __SI_FAULT;
+ info.si_addr = eip;
+ /*
+ * (~cwd & swd) will mask out exceptions that are not set to unmasked
+ * status. 0x3f is the exception bits in these regs, 0x200 is the
+ * C1 reg you need in case of a stack fault, 0x040 is the stack
+ * fault bit. We should only be taking one exception at a time,
+ * so if this combination doesn't produce any single exception,
+ * then we have a bad program that isn't syncronizing its FPU usage
+ * and it will suffer the consequences since we won't be able to
+ * fully reproduce the context of the exception
+ */
+ cwd = get_fpu_cwd(task);
+ swd = get_fpu_swd(task);
+ switch (((~cwd) & swd & 0x3f) | (swd & 0x240)) {
+ case 0x000:
+ default:
+ break;
+ case 0x001: /* Invalid Op */
+ case 0x041: /* Stack Fault */
+ case 0x241: /* Stack Fault | Direction */
+ info.si_code = FPE_FLTINV;
+ /* Should we clear the SF or let user space do it ???? */
+ break;
+ case 0x002: /* Denormalize */
+ case 0x010: /* Underflow */
+ info.si_code = FPE_FLTUND;
+ break;
+ case 0x004: /* Zero Divide */
+ info.si_code = FPE_FLTDIV;
+ break;
+ case 0x008: /* Overflow */
+ info.si_code = FPE_FLTOVF;
+ break;
+ case 0x020: /* Precision */
+ info.si_code = FPE_FLTRES;
+ break;
+ }
+ force_sig_info(SIGFPE, &info, task);
+ }
+
+ fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
+ {
+ ignore_fpu_irq = 1;
+ math_error((void __user *)regs->eip);
+ }
+
+ void simd_math_error(void __user *eip)
+ {
+ struct task_struct * task;
+ siginfo_t info;
+ unsigned short mxcsr;
+
+ /*
+ * Save the info for the exception handler and clear the error.
+ */
+ task = current;
+ save_init_fpu(task);
+ task->thread.trap_no = 19;
+ task->thread.error_code = 0;
+ info.si_signo = SIGFPE;
+ info.si_errno = 0;
+ info.si_code = __SI_FAULT;
+ info.si_addr = eip;
+ /*
+ * The SIMD FPU exceptions are handled a little differently, as there
+ * is only a single status/control register. Thus, to determine which
+ * unmasked exception was caught we must mask the exception mask bits
+ * at 0x1f80, and then use these to mask the exception bits at 0x3f.
+ */
+ mxcsr = get_fpu_mxcsr(task);
+ switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
+ case 0x000:
+ default:
+ break;
+ case 0x001: /* Invalid Op */
+ info.si_code = FPE_FLTINV;
+ break;
+ case 0x002: /* Denormalize */
+ case 0x010: /* Underflow */
+ info.si_code = FPE_FLTUND;
+ break;
+ case 0x004: /* Zero Divide */
+ info.si_code = FPE_FLTDIV;
+ break;
+ case 0x008: /* Overflow */
+ info.si_code = FPE_FLTOVF;
+ break;
+ case 0x020: /* Precision */
+ info.si_code = FPE_FLTRES;
+ break;
+ }
+ force_sig_info(SIGFPE, &info, task);
+ }
+
+ fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
+ long error_code)
+ {
+ if (cpu_has_xmm) {
+ /* Handle SIMD FPU exceptions on PIII+ processors. */
+ ignore_fpu_irq = 1;
+ simd_math_error((void __user *)regs->eip);
+ } else {
+ /*
+ * Handle strange cache flush from user space exception
+ * in all other cases. This is undocumented behaviour.
+ */
+ if (regs->eflags & VM_MASK) {
+ handle_vm86_fault((struct kernel_vm86_regs *)regs,
+ error_code);
+ return;
+ }
+ die_if_kernel("cache flush denied", regs, error_code);
+ current->thread.trap_no = 19;
+ current->thread.error_code = error_code;
+ force_sig(SIGSEGV, current);
+ }
+ }
+
+ /*
+ * 'math_state_restore()' saves the current math information in the
+ * old math state array, and gets the new ones from the current task
+ *
+ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
+ * Don't touch unless you *really* know how it works.
+ *
+ * Must be called with kernel preemption disabled (in this case,
+ * local interrupts are disabled at the call-site in entry.S).
+ */
+ asmlinkage void math_state_restore(struct pt_regs regs)
+ {
+ struct thread_info *thread = current_thread_info();
+ struct task_struct *tsk = thread->task;
+
++ /* NB. 'clts' is done for us by Xen during virtual trap. */
+ if (!tsk_used_math(tsk))
+ init_fpu(tsk);
+ restore_fpu(tsk);
+ thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
+ }
+
+ #ifndef CONFIG_MATH_EMULATION
+
+ asmlinkage void math_emulate(long arg)
+ {
+ printk("math-emulation not enabled and no coprocessor found.\n");
+ printk("killing %s.\n",current->comm);
+ force_sig(SIGFPE,current);
+ schedule();
+ }
+
+ #endif /* CONFIG_MATH_EMULATION */
+
+ #ifdef CONFIG_X86_F00F_BUG
+ void __init trap_init_f00f_bug(void)
+ {
+ __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
+
+ /*
+ * Update the IDT descriptor and reload the IDT so that
+ * it uses the read-only mapped virtual address.
+ */
+ idt_descr.address = fix_to_virt(FIX_F00F_IDT);
+ __asm__ __volatile__("lidt %0" : : "m" (idt_descr));
+ }
+ #endif
+
+
+ /* NB. All these are "trap gates" (i.e. events_mask isn't cleared). */
+ static trap_info_t trap_table[] = {
+ { 0, 0, __KERNEL_CS, (unsigned long)divide_error },
+ { 1, 0, __KERNEL_CS, (unsigned long)debug },
+ { 3, 3, __KERNEL_CS, (unsigned long)int3 },
+ { 4, 3, __KERNEL_CS, (unsigned long)overflow },
+ { 5, 3, __KERNEL_CS, (unsigned long)bounds },
+ { 6, 0, __KERNEL_CS, (unsigned long)invalid_op },
+ { 7, 0, __KERNEL_CS, (unsigned long)device_not_available },
+ { 9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
+ { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS },
+ { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present },
+ { 12, 0, __KERNEL_CS, (unsigned long)stack_segment },
+ { 13, 0, __KERNEL_CS, (unsigned long)general_protection },
+ { 14, 0, __KERNEL_CS, (unsigned long)page_fault },
+ { 15, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment },
+ { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error },
+ { 17, 0, __KERNEL_CS, (unsigned long)alignment_check },
+ #ifdef CONFIG_X86_MCE
+ { 18, 0, __KERNEL_CS, (unsigned long)machine_check },
+ #endif
+ { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error },
+ { SYSCALL_VECTOR, 3, __KERNEL_CS, (unsigned long)system_call },
+ { 0, 0, 0, 0 }
+ };
+
+ void __init trap_init(void)
+ {
+ HYPERVISOR_set_trap_table(trap_table);
+ HYPERVISOR_set_fast_trap(SYSCALL_VECTOR);
+
+ /*
+ * default LDT is a single-entry callgate to lcall7 for iBCS
+ * and a callgate to lcall27 for Solaris/x86 binaries
+ */
+ make_lowmem_page_readonly(&default_ldt[0]);
+ xen_flush_page_update_queue();
+
+ /*
+ * Should be a barrier for any external CPU state.
+ */
+ cpu_init();
+ }
++
++int smp_trap_init(trap_info_t *trap_ctxt)
++{
++ trap_info_t *t = trap_table;
++
++ for (t = trap_table; t->address; t++) {
++ trap_ctxt[t->vector].flags = t->flags;
++ trap_ctxt[t->vector].cs = t->cs;
++ trap_ctxt[t->vector].address = t->address;
++ }
++ return SYSCALL_VECTOR;
++}
--- /dev/null
-pgd_t *cur_pgd; /* XXXsmp */
+ /*
+ * linux/arch/i386/mm/fault.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ */
+
+ #include <linux/signal.h>
+ #include <linux/sched.h>
+ #include <linux/kernel.h>
+ #include <linux/errno.h>
+ #include <linux/string.h>
+ #include <linux/types.h>
+ #include <linux/ptrace.h>
+ #include <linux/mman.h>
+ #include <linux/mm.h>
+ #include <linux/smp.h>
+ #include <linux/smp_lock.h>
+ #include <linux/interrupt.h>
+ #include <linux/init.h>
+ #include <linux/tty.h>
+ #include <linux/vt_kern.h> /* For unblank_screen() */
+ #include <linux/highmem.h>
+ #include <linux/module.h>
++#include <linux/percpu.h>
+
+ #include <asm/system.h>
+ #include <asm/uaccess.h>
+ #include <asm/desc.h>
+ #include <asm/kdebug.h>
+
+ extern void die(const char *,struct pt_regs *,long);
+
- page = ((unsigned long *) cur_pgd)[address >> 22];
++DEFINE_PER_CPU(pgd_t *, cur_pgd);
+
+ /*
+ * Unlock any spinlocks which will prevent us from getting the
+ * message out
+ */
+ void bust_spinlocks(int yes)
+ {
+ int loglevel_save = console_loglevel;
+
+ if (yes) {
+ oops_in_progress = 1;
+ return;
+ }
+ #ifdef CONFIG_VT
+ unblank_screen();
+ #endif
+ oops_in_progress = 0;
+ /*
+ * OK, the message is on the console. Now we call printk()
+ * without oops_in_progress set so that printk will give klogd
+ * a poke. Hold onto your hats...
+ */
+ console_loglevel = 15; /* NMI oopser may have shut the console up */
+ printk(" ");
+ console_loglevel = loglevel_save;
+ }
+
+ /*
+ * Return EIP plus the CS segment base. The segment limit is also
+ * adjusted, clamped to the kernel/user address space (whichever is
+ * appropriate), and returned in *eip_limit.
+ *
+ * The segment is checked, because it might have been changed by another
+ * task between the original faulting instruction and here.
+ *
+ * If CS is no longer a valid code segment, or if EIP is beyond the
+ * limit, or if it is a kernel address when CS is not a kernel segment,
+ * then the returned value will be greater than *eip_limit.
+ *
+ * This is slow, but is very rarely executed.
+ */
+ static inline unsigned long get_segment_eip(struct pt_regs *regs,
+ unsigned long *eip_limit)
+ {
+ unsigned long eip = regs->eip;
+ unsigned seg = regs->xcs & 0xffff;
+ u32 seg_ar, seg_limit, base, *desc;
+
+ /* The standard kernel/user address space limit. */
+ *eip_limit = (seg & 2) ? USER_DS.seg : KERNEL_DS.seg;
+
+ /* Unlikely, but must come before segment checks. */
+ if (unlikely((regs->eflags & VM_MASK) != 0))
+ return eip + (seg << 4);
+
+ /* By far the most common cases. */
+ if (likely(seg == __USER_CS || seg == __KERNEL_CS))
+ return eip;
+
+ /* Check the segment exists, is within the current LDT/GDT size,
+ that kernel/user (ring 0..3) has the appropriate privilege,
+ that it's a code segment, and get the limit. */
+ __asm__ ("larl %3,%0; lsll %3,%1"
+ : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
+ if ((~seg_ar & 0x9800) || eip > seg_limit) {
+ *eip_limit = 0;
+ return 1; /* So that returned eip > *eip_limit. */
+ }
+
+ /* Get the GDT/LDT descriptor base.
+ When you look for races in this code remember that
+ LDT and other horrors are only used in user space. */
+ if (seg & (1<<2)) {
+ /* Must lock the LDT while reading it. */
+ down(¤t->mm->context.sem);
+ desc = current->mm->context.ldt;
+ desc = (void *)desc + (seg & ~7);
+ } else {
+ /* Must disable preemption while reading the GDT. */
+ desc = (u32 *)get_cpu_gdt_table(get_cpu());
+ desc = (void *)desc + (seg & ~7);
+ }
+
+ /* Decode the code segment base from the descriptor */
+ base = get_desc_base((unsigned long *)desc);
+
+ if (seg & (1<<2)) {
+ up(¤t->mm->context.sem);
+ } else
+ put_cpu();
+
+ /* Adjust EIP and segment limit, and clamp at the kernel limit.
+ It's legitimate for segments to wrap at 0xffffffff. */
+ seg_limit += base;
+ if (seg_limit < *eip_limit && seg_limit >= base)
+ *eip_limit = seg_limit;
+ return eip + base;
+ }
+
+ /*
+ * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
+ * Check that here and ignore it.
+ */
+ static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
+ {
+ unsigned long limit;
+ unsigned long instr = get_segment_eip (regs, &limit);
+ int scan_more = 1;
+ int prefetch = 0;
+ int i;
+
+ for (i = 0; scan_more && i < 15; i++) {
+ unsigned char opcode;
+ unsigned char instr_hi;
+ unsigned char instr_lo;
+
+ if (instr > limit)
+ break;
+ if (__get_user(opcode, (unsigned char *) instr))
+ break;
+
+ instr_hi = opcode & 0xf0;
+ instr_lo = opcode & 0x0f;
+ instr++;
+
+ switch (instr_hi) {
+ case 0x20:
+ case 0x30:
+ /* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
+ scan_more = ((instr_lo & 7) == 0x6);
+ break;
+
+ case 0x60:
+ /* 0x64 thru 0x67 are valid prefixes in all modes. */
+ scan_more = (instr_lo & 0xC) == 0x4;
+ break;
+ case 0xF0:
+ /* 0xF0, 0xF2, and 0xF3 are valid prefixes */
+ scan_more = !instr_lo || (instr_lo>>1) == 1;
+ break;
+ case 0x00:
+ /* Prefetch instruction is 0x0F0D or 0x0F18 */
+ scan_more = 0;
+ if (instr > limit)
+ break;
+ if (__get_user(opcode, (unsigned char *) instr))
+ break;
+ prefetch = (instr_lo == 0xF) &&
+ (opcode == 0x0D || opcode == 0x18);
+ break;
+ default:
+ scan_more = 0;
+ break;
+ }
+ }
+ return prefetch;
+ }
+
+ static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
+ unsigned long error_code)
+ {
+ if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ boot_cpu_data.x86 >= 6)) {
+ /* Catch an obscure case of prefetch inside an NX page. */
+ if (nx_enabled && (error_code & 16))
+ return 0;
+ return __is_prefetch(regs, addr);
+ }
+ return 0;
+ }
+
+ fastcall void do_invalid_op(struct pt_regs *, unsigned long);
+
+ /*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ *
+ * error_code:
+ * bit 0 == 0 means no page found, 1 means protection fault
+ * bit 1 == 0 means read, 1 means write
+ * bit 2 == 0 means kernel, 1 means user-mode
+ */
+ fastcall void do_page_fault(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address)
+ {
+ struct task_struct *tsk;
+ struct mm_struct *mm;
+ struct vm_area_struct * vma;
+ unsigned long page;
+ int write;
+ siginfo_t info;
+
+ /* Set the "privileged fault" bit to something sane. */
+ error_code &= 3;
+ error_code |= (regs->xcs & 2) << 1;
+ if (regs->eflags & X86_EFLAGS_VM)
+ error_code |= 4;
+
+ if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
+ SIGSEGV) == NOTIFY_STOP)
+ return;
+ #if 0
+ /* It's safe to allow irq's after cr2 has been saved */
+ if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
+ local_irq_enable();
+ #endif
+
+ tsk = current;
+
+ info.si_code = SEGV_MAPERR;
+
+ /*
+ * We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ *
+ * This verifies that the fault happens in kernel space
+ * (error_code & 4) == 0, and that the fault was not a
+ * protection error (error_code & 1) == 0.
+ */
+ if (unlikely(address >= TASK_SIZE)) {
+ if (!(error_code & 5))
+ goto vmalloc_fault;
+ /*
+ * Don't take the mm semaphore here. If we fixup a prefetch
+ * fault we could otherwise deadlock.
+ */
+ goto bad_area_nosemaphore;
+ }
+
+ mm = tsk->mm;
+
+ /*
+ * If we're in an interrupt, have no user context or are running in an
+ * atomic region then we must not take the fault..
+ */
+ if (in_atomic() || !mm)
+ goto bad_area_nosemaphore;
+
+ /* When running in the kernel we expect faults to occur only to
+ * addresses in user space. All other faults represent errors in the
+ * kernel and should generate an OOPS. Unfortunatly, in the case of an
+ * erroneous fault occuring in a code path which already holds mmap_sem
+ * we will deadlock attempting to validate the fault against the
+ * address space. Luckily the kernel only validly references user
+ * space from well defined areas of code, which are listed in the
+ * exceptions table.
+ *
+ * As the vast majority of faults will be valid we will only perform
+ * the source reference check when there is a possibilty of a deadlock.
+ * Attempt to lock the address space, if we cannot we then validate the
+ * source. If this is invalid we can skip the address space check,
+ * thus avoiding the deadlock.
+ */
+ if (!down_read_trylock(&mm->mmap_sem)) {
+ if ((error_code & 4) == 0 &&
+ !search_exception_tables(regs->eip))
+ goto bad_area_nosemaphore;
+ down_read(&mm->mmap_sem);
+ }
+
+ vma = find_vma(mm, address);
+ if (!vma)
+ goto bad_area;
+ if (vma->vm_start <= address)
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if (error_code & 4) {
+ /*
+ * accessing the stack below %esp is always a bug.
+ * The "+ 32" is there due to some instructions (like
+ * pusha) doing post-decrement on the stack and that
+ * doesn't show up until later..
+ */
+ if (address + 32 < regs->esp)
+ goto bad_area;
+ }
+ if (expand_stack(vma, address))
+ goto bad_area;
+ /*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+ good_area:
+ info.si_code = SEGV_ACCERR;
+ write = 0;
+ switch (error_code & 3) {
+ default: /* 3: write, present */
+ #ifdef TEST_VERIFY_AREA
+ if (regs->cs == KERNEL_CS)
+ printk("WP fault at %08lx\n", regs->eip);
+ #endif
+ /* fall through */
+ case 2: /* write, not present */
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ write++;
+ break;
+ case 1: /* read, present */
+ goto bad_area;
+ case 0: /* read, not present */
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+ }
+
+ survive:
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ switch (handle_mm_fault(mm, vma, address, write)) {
+ case VM_FAULT_MINOR:
+ tsk->min_flt++;
+ break;
+ case VM_FAULT_MAJOR:
+ tsk->maj_flt++;
+ break;
+ case VM_FAULT_SIGBUS:
+ goto do_sigbus;
+ case VM_FAULT_OOM:
+ goto out_of_memory;
+ default:
+ BUG();
+ }
+
+ /*
+ * Did it hit the DOS screen memory VA from vm86 mode?
+ */
+ if (regs->eflags & VM_MASK) {
+ unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
+ if (bit < 32)
+ tsk->thread.screen_bitmap |= 1 << bit;
+ }
+ up_read(&mm->mmap_sem);
+ return;
+
+ /*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+ bad_area:
+ up_read(&mm->mmap_sem);
+
+ bad_area_nosemaphore:
+ /* User mode accesses just cause a SIGSEGV */
+ if (error_code & 4) {
+ /*
+ * Valid to do another page fault here because this one came
+ * from user space.
+ */
+ if (is_prefetch(regs, address, error_code))
+ return;
+
+ tsk->thread.cr2 = address;
+ /* Kernel addresses are always protection faults */
+ tsk->thread.error_code = error_code | (address >= TASK_SIZE);
+ tsk->thread.trap_no = 14;
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* info.si_code has been set above */
+ info.si_addr = (void __user *)address;
+ force_sig_info(SIGSEGV, &info, tsk);
+ return;
+ }
+
+ #ifdef CONFIG_X86_F00F_BUG
+ /*
+ * Pentium F0 0F C7 C8 bug workaround.
+ */
+ if (boot_cpu_data.f00f_bug) {
+ unsigned long nr;
+
+ nr = (address - idt_descr.address) >> 3;
+
+ if (nr == 6) {
+ do_invalid_op(regs, 0);
+ return;
+ }
+ }
+ #endif
+
+ no_context:
+ /* Are we prepared to handle this kernel fault? */
+ if (fixup_exception(regs))
+ return;
+
+ /*
+ * Valid to do another page fault here, because if this fault
+ * had been triggered by is_prefetch fixup_exception would have
+ * handled it.
+ */
+ if (is_prefetch(regs, address, error_code))
+ return;
+
+ /*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+
+ bust_spinlocks(1);
+
+ #ifdef CONFIG_X86_PAE
+ if (error_code & 16) {
+ pte_t *pte = lookup_address(address);
+
+ if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
+ printk(KERN_CRIT "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n", current->uid);
+ }
+ #endif
+ if (address < PAGE_SIZE)
+ printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
+ else
+ printk(KERN_ALERT "Unable to handle kernel paging request");
+ printk(" at virtual address %08lx\n",address);
+ printk(KERN_ALERT " printing eip:\n");
+ printk("%08lx\n", regs->eip);
- pgd = index + cur_pgd;
++ page = ((unsigned long *) per_cpu(cur_pgd, smp_processor_id()))
++ [address >> 22];
+ printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
+ machine_to_phys(page));
+ /*
+ * We must not directly access the pte in the highpte
+ * case, the page table might be allocated in highmem.
+ * And lets rather not kmap-atomic the pte, just in case
+ * it's allocated already.
+ */
+ #ifndef CONFIG_HIGHPTE
+ if (page & 1) {
+ page &= PAGE_MASK;
+ address &= 0x003ff000;
+ page = machine_to_phys(page);
+ page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
+ printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page,
+ machine_to_phys(page));
+ }
+ #endif
+ show_trace(NULL, (unsigned long *)®s[1]);
+ die("Oops", regs, error_code);
+ bust_spinlocks(0);
+ do_exit(SIGKILL);
+
+ /*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+ out_of_memory:
+ up_read(&mm->mmap_sem);
+ if (tsk->pid == 1) {
+ yield();
+ down_read(&mm->mmap_sem);
+ goto survive;
+ }
+ printk("VM: killing process %s\n", tsk->comm);
+ if (error_code & 4)
+ do_exit(SIGKILL);
+ goto no_context;
+
+ do_sigbus:
+ up_read(&mm->mmap_sem);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (!(error_code & 4))
+ goto no_context;
+
+ /* User space => ok to do another page fault */
+ if (is_prefetch(regs, address, error_code))
+ return;
+
+ tsk->thread.cr2 = address;
+ tsk->thread.error_code = error_code;
+ tsk->thread.trap_no = 14;
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void __user *)address;
+ force_sig_info(SIGBUS, &info, tsk);
+ return;
+
+ vmalloc_fault:
+ {
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ *
+ * Do _not_ use "tsk" here. We might be inside
+ * an interrupt in the middle of a task switch..
+ */
+ int index = pgd_index(address);
+ pgd_t *pgd, *pgd_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+ pte_t *pte_k;
+
++ pgd = index + per_cpu(cur_pgd, smp_processor_id());
+ pgd_k = init_mm.pgd + index;
+
+ if (!pgd_present(*pgd_k))
+ goto no_context;
+
+ /*
+ * set_pgd(pgd, *pgd_k); here would be useless on PAE
+ * and redundant with the set_pmd() on non-PAE. As would
+ * set_pud.
+ */
+
+ pud = pud_offset(pgd, address);
+ pud_k = pud_offset(pgd_k, address);
+ if (!pud_present(*pud_k))
+ goto no_context;
+
+ pmd = pmd_offset(pud, address);
+ pmd_k = pmd_offset(pud_k, address);
+ if (!pmd_present(*pmd_k))
+ goto no_context;
+ set_pmd(pmd, *pmd_k);
+ xen_flush_page_update_queue(); /* flush PMD update */
+
+ pte_k = pte_offset_kernel(pmd_k, address);
+ if (!pte_present(*pte_k))
+ goto no_context;
+ return;
+ }
+ }
--- /dev/null
-static mmu_update_t update_queue[QUEUE_SIZE];
-unsigned int mmu_update_queue_idx = 0;
-#define idx mmu_update_queue_idx
+ /******************************************************************************
+ * mm/hypervisor.c
+ *
+ * Update page tables via the hypervisor.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+ #include <linux/config.h>
+ #include <linux/sched.h>
+ #include <linux/mm.h>
+ #include <linux/vmalloc.h>
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+ #include <asm-xen/hypervisor.h>
+ #include <asm-xen/multicall.h>
+ #include <asm-xen/balloon.h>
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++#include <linux/percpu.h>
++#endif
+
+ /*
+ * This suffices to protect us if we ever move to SMP domains.
+ * Further, it protects us against interrupts. At the very least, this is
+ * required for the network driver which flushes the update queue before
+ * pushing new receive buffers.
+ */
+ static spinlock_t update_lock = SPIN_LOCK_UNLOCKED;
+
+ /* Linux 2.6 isn't using the traditional batched interface. */
+ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ #define QUEUE_SIZE 2048
+ #define pte_offset_kernel pte_offset
+ #define pmd_val_ma(v) (v).pmd;
+ #define pud_t pgd_t
+ #define pud_offset(d, va) d
+ #else
++#ifdef CONFIG_SMP
++#define QUEUE_SIZE 1
++#else
+ #define QUEUE_SIZE 128
+ #define pmd_val_ma(v) (v).pud.pgd.pgd;
+ #endif
++#endif
+
- idx = 0;
++DEFINE_PER_CPU(mmu_update_t, update_queue[QUEUE_SIZE]);
++DEFINE_PER_CPU(unsigned int, mmu_update_queue_idx);
+
+ /*
+ * MULTICALL_flush_page_update_queue:
+ * This is a version of the flush which queues as part of a multicall.
+ */
+ void MULTICALL_flush_page_update_queue(void)
+ {
++ int cpu = smp_processor_id();
++ int idx;
+ unsigned long flags;
+ unsigned int _idx;
+ spin_lock_irqsave(&update_lock, flags);
++ idx = per_cpu(mmu_update_queue_idx, cpu);
+ if ( (_idx = idx) != 0 )
+ {
- (unsigned long)update_queue,
++ per_cpu(mmu_update_queue_idx, cpu) = 0;
+ wmb(); /* Make sure index is cleared first to avoid double updates. */
+ queue_multicall3(__HYPERVISOR_mmu_update,
- unsigned int _idx = idx;
- idx = 0;
++ (unsigned long)&per_cpu(update_queue[0], cpu),
+ (unsigned long)_idx,
+ (unsigned long)NULL);
+ }
+ spin_unlock_irqrestore(&update_lock, flags);
+ }
+
+ static inline void __flush_page_update_queue(void)
+ {
- if ( unlikely(HYPERVISOR_mmu_update(update_queue, _idx, NULL) < 0) )
++ int cpu = smp_processor_id();
++ unsigned int _idx = per_cpu(mmu_update_queue_idx, cpu);
++ per_cpu(mmu_update_queue_idx, cpu) = 0;
+ wmb(); /* Make sure index is cleared first to avoid double updates. */
- if ( idx != 0 ) __flush_page_update_queue();
++ if ( unlikely(HYPERVISOR_mmu_update(&per_cpu(update_queue[0], cpu), _idx, NULL) < 0) )
+ {
+ printk(KERN_ALERT "Failed to execute MMU updates.\n");
+ BUG();
+ }
+ }
+
+ void _flush_page_update_queue(void)
+ {
++ int cpu = smp_processor_id();
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
- idx++;
- if ( unlikely(idx == QUEUE_SIZE) ) __flush_page_update_queue();
++ if ( per_cpu(mmu_update_queue_idx, cpu) != 0 ) __flush_page_update_queue();
+ spin_unlock_irqrestore(&update_lock, flags);
+ }
+
+ static inline void increment_index(void)
+ {
- idx++;
++ int cpu = smp_processor_id();
++ per_cpu(mmu_update_queue_idx, cpu)++;
++ if ( unlikely(per_cpu(mmu_update_queue_idx, cpu) == QUEUE_SIZE) ) __flush_page_update_queue();
+ }
+
+ static inline void increment_index_and_flush(void)
+ {
- update_queue[idx].ptr = virt_to_machine(ptr);
- update_queue[idx].val = val;
++ int cpu = smp_processor_id();
++ per_cpu(mmu_update_queue_idx, cpu)++;
+ __flush_page_update_queue();
+ }
+
+ void queue_l1_entry_update(pte_t *ptr, unsigned long val)
+ {
++ int cpu = smp_processor_id();
++ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = virt_to_machine(ptr);
- update_queue[idx].val = pmd_val_ma(val);
++ idx = per_cpu(mmu_update_queue_idx, cpu);
++ per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
++ per_cpu(update_queue[idx], cpu).val = val;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+ }
+
+ void queue_l2_entry_update(pmd_t *ptr, pmd_t val)
+ {
++ int cpu = smp_processor_id();
++ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_NEW_BASEPTR;
++ idx = per_cpu(mmu_update_queue_idx, cpu);
++ per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
++ per_cpu(update_queue[idx], cpu).val = pmd_val_ma(val);
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+ }
+
+ void queue_pt_switch(unsigned long ptr)
+ {
++ int cpu = smp_processor_id();
++ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_TLB_FLUSH;
++ idx = per_cpu(mmu_update_queue_idx, cpu);
++ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
++ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
++ per_cpu(update_queue[idx], cpu).val = MMUEXT_NEW_BASEPTR;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+ }
+
+ void queue_tlb_flush(void)
+ {
++ int cpu = smp_processor_id();
++ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = MMU_EXTENDED_COMMAND;
- update_queue[idx].ptr |= ptr & PAGE_MASK;
- update_queue[idx].val = MMUEXT_INVLPG;
++ idx = per_cpu(mmu_update_queue_idx, cpu);
++ per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND;
++ per_cpu(update_queue[idx], cpu).val = MMUEXT_TLB_FLUSH;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+ }
+
+ void queue_invlpg(unsigned long ptr)
+ {
++ int cpu = smp_processor_id();
++ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_PIN_L2_TABLE;
++ idx = per_cpu(mmu_update_queue_idx, cpu);
++ per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND;
++ per_cpu(update_queue[idx], cpu).ptr |= ptr & PAGE_MASK;
++ per_cpu(update_queue[idx], cpu).val = MMUEXT_INVLPG;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+ }
+
+ void queue_pgd_pin(unsigned long ptr)
+ {
++ int cpu = smp_processor_id();
++ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_UNPIN_TABLE;
++ idx = per_cpu(mmu_update_queue_idx, cpu);
++ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
++ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
++ per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L2_TABLE;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+ }
+
+ void queue_pgd_unpin(unsigned long ptr)
+ {
++ int cpu = smp_processor_id();
++ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_PIN_L1_TABLE;
++ idx = per_cpu(mmu_update_queue_idx, cpu);
++ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
++ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
++ per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+ }
+
+ void queue_pte_pin(unsigned long ptr)
+ {
++ int cpu = smp_processor_id();
++ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_UNPIN_TABLE;
++ idx = per_cpu(mmu_update_queue_idx, cpu);
++ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
++ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
++ per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L1_TABLE;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+ }
+
+ void queue_pte_unpin(unsigned long ptr)
+ {
++ int cpu = smp_processor_id();
++ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = MMU_EXTENDED_COMMAND | ptr;
- update_queue[idx].val = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT);
++ idx = per_cpu(mmu_update_queue_idx, cpu);
++ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
++ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
++ per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+ }
+
+ void queue_set_ldt(unsigned long ptr, unsigned long len)
+ {
++ int cpu = smp_processor_id();
++ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
- update_queue[idx].val = pfn;
++ idx = per_cpu(mmu_update_queue_idx, cpu);
++ per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND | ptr;
++ per_cpu(update_queue[idx], cpu).val = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT);
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+ }
+
+ void queue_machphys_update(unsigned long mfn, unsigned long pfn)
+ {
++ int cpu = smp_processor_id();
++ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = virt_to_machine(ptr);
- update_queue[idx].val = val;
++ idx = per_cpu(mmu_update_queue_idx, cpu);
++ per_cpu(update_queue[idx], cpu).ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
++ per_cpu(update_queue[idx], cpu).val = pfn;
+ increment_index();
+ spin_unlock_irqrestore(&update_lock, flags);
+ }
+
+ /* queue and flush versions of the above */
+ void xen_l1_entry_update(pte_t *ptr, unsigned long val)
+ {
++ int cpu = smp_processor_id();
++ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = virt_to_machine(ptr);
- update_queue[idx].val = pmd_val_ma(val);
++ idx = per_cpu(mmu_update_queue_idx, cpu);
++ per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
++ per_cpu(update_queue[idx], cpu).val = val;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+ }
+
+ void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
+ {
++ int cpu = smp_processor_id();
++ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_NEW_BASEPTR;
++ idx = per_cpu(mmu_update_queue_idx, cpu);
++ per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
++ per_cpu(update_queue[idx], cpu).val = pmd_val_ma(val);
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+ }
+
+ void xen_pt_switch(unsigned long ptr)
+ {
++ int cpu = smp_processor_id();
++ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_TLB_FLUSH;
++ idx = per_cpu(mmu_update_queue_idx, cpu);
++ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
++ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
++ per_cpu(update_queue[idx], cpu).val = MMUEXT_NEW_BASEPTR;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+ }
+
+ void xen_tlb_flush(void)
+ {
++ int cpu = smp_processor_id();
++ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = MMU_EXTENDED_COMMAND;
- update_queue[idx].ptr |= ptr & PAGE_MASK;
- update_queue[idx].val = MMUEXT_INVLPG;
++ idx = per_cpu(mmu_update_queue_idx, cpu);
++ per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND;
++ per_cpu(update_queue[idx], cpu).val = MMUEXT_TLB_FLUSH;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+ }
+
+ void xen_invlpg(unsigned long ptr)
+ {
++ int cpu = smp_processor_id();
++ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_PIN_L2_TABLE;
++ idx = per_cpu(mmu_update_queue_idx, cpu);
++ per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND;
++ per_cpu(update_queue[idx], cpu).ptr |= ptr & PAGE_MASK;
++ per_cpu(update_queue[idx], cpu).val = MMUEXT_INVLPG;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+ }
+
+ void xen_pgd_pin(unsigned long ptr)
+ {
++ int cpu = smp_processor_id();
++ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_UNPIN_TABLE;
++ idx = per_cpu(mmu_update_queue_idx, cpu);
++ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
++ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
++ per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L2_TABLE;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+ }
+
+ void xen_pgd_unpin(unsigned long ptr)
+ {
++ int cpu = smp_processor_id();
++ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_PIN_L1_TABLE;
++ idx = per_cpu(mmu_update_queue_idx, cpu);
++ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
++ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
++ per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+ }
+
+ void xen_pte_pin(unsigned long ptr)
+ {
++ int cpu = smp_processor_id();
++ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = phys_to_machine(ptr);
- update_queue[idx].ptr |= MMU_EXTENDED_COMMAND;
- update_queue[idx].val = MMUEXT_UNPIN_TABLE;
++ idx = per_cpu(mmu_update_queue_idx, cpu);
++ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
++ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
++ per_cpu(update_queue[idx], cpu).val = MMUEXT_PIN_L1_TABLE;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+ }
+
+ void xen_pte_unpin(unsigned long ptr)
+ {
++ int cpu = smp_processor_id();
++ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = MMU_EXTENDED_COMMAND | ptr;
- update_queue[idx].val = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT);
++ idx = per_cpu(mmu_update_queue_idx, cpu);
++ per_cpu(update_queue[idx], cpu).ptr = phys_to_machine(ptr);
++ per_cpu(update_queue[idx], cpu).ptr |= MMU_EXTENDED_COMMAND;
++ per_cpu(update_queue[idx], cpu).val = MMUEXT_UNPIN_TABLE;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+ }
+
+ void xen_set_ldt(unsigned long ptr, unsigned long len)
+ {
++ int cpu = smp_processor_id();
++ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
- update_queue[idx].ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
- update_queue[idx].val = pfn;
++ idx = per_cpu(mmu_update_queue_idx, cpu);
++ per_cpu(update_queue[idx], cpu).ptr = MMU_EXTENDED_COMMAND | ptr;
++ per_cpu(update_queue[idx], cpu).val = MMUEXT_SET_LDT | (len << MMUEXT_CMD_SHIFT);
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+ }
+
+ void xen_machphys_update(unsigned long mfn, unsigned long pfn)
+ {
++ int cpu = smp_processor_id();
++ int idx;
+ unsigned long flags;
+ spin_lock_irqsave(&update_lock, flags);
++ idx = per_cpu(mmu_update_queue_idx, cpu);
++ per_cpu(update_queue[idx], cpu).ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
++ per_cpu(update_queue[idx], cpu).val = pfn;
+ increment_index_and_flush();
+ spin_unlock_irqrestore(&update_lock, flags);
+ }
+
+ #ifdef CONFIG_XEN_PHYSDEV_ACCESS
+
+ unsigned long allocate_empty_lowmem_region(unsigned long pages)
+ {
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ unsigned long *pfn_array;
+ unsigned long vstart;
+ unsigned long i;
+ unsigned int order = get_order(pages*PAGE_SIZE);
+
+ vstart = __get_free_pages(GFP_KERNEL, order);
+ if ( vstart == 0 )
+ return 0UL;
+
+ scrub_pages(vstart, 1 << order);
+
+ pfn_array = vmalloc((1<<order) * sizeof(*pfn_array));
+ if ( pfn_array == NULL )
+ BUG();
+
+ for ( i = 0; i < (1<<order); i++ )
+ {
+ pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE)));
+ pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
+ pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
+ pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
+ pfn_array[i] = pte->pte_low >> PAGE_SHIFT;
+ queue_l1_entry_update(pte, 0);
+ phys_to_machine_mapping[__pa(vstart)>>PAGE_SHIFT] = INVALID_P2M_ENTRY;
+ }
+
+ /* Flush updates through and flush the TLB. */
+ xen_tlb_flush();
+
+ balloon_put_pages(pfn_array, 1 << order);
+
+ vfree(pfn_array);
+
+ return vstart;
+ }
+
+ #endif /* CONFIG_XEN_PHYSDEV_ACCESS */
--- /dev/null
-obj-y := ctrl_if.o evtchn.o fixup.o reboot.o xen_proc.o gnttab.o skbuff.o devmem.o
+ #
+ # Makefile for the linux kernel.
+ #
+
+ XENARCH := $(subst ",,$(CONFIG_XENARCH))
+
+ CPPFLAGS_vmlinux.lds += -U$(XENARCH)
+
+ $(obj)/vmlinux.lds.S:
+ @ln -fsn $(srctree)/arch/$(XENARCH)/kernel/vmlinux.lds.S $@
+
+ extra-y += vmlinux.lds
+
++obj-y := ctrl_if.o evtchn.o fixup.o reboot.o xen_proc.o \
++ gnttab.o skbuff.o devmem.o smp.o
--- /dev/null
-static CONTROL_RING_IDX ctrl_if_tx_resp_cons;
-static CONTROL_RING_IDX ctrl_if_rx_req_cons;
+ /******************************************************************************
+ * ctrl_if.c
+ *
+ * Management functions for special interface to the domain controller.
+ *
+ * Copyright (c) 2004, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+ #include <linux/config.h>
+ #include <linux/kernel.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/string.h>
+ #include <linux/errno.h>
+ #include <linux/irq.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
+ #include <asm-xen/ctrl_if.h>
+ #include <asm-xen/evtchn.h>
+
+ #if 0
+ #define DPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
+ __FILE__ , __LINE__ , ## _a )
+ #else
+ #define DPRINTK(_f, _a...) ((void)0)
+ #endif
+
++/*
++ * Extra ring macros to sync a consumer index up to the public producer index.
++ * Generally UNSAFE, but we use it for recovery and shutdown in some cases.
++ */
++#define RING_DROP_PENDING_REQUESTS(_r) \
++ do { \
++ (_r)->req_cons = (_r)->sring->req_prod; \
++ } while (0)
++#define RING_DROP_PENDING_RESPONSES(_r) \
++ do { \
++ (_r)->rsp_cons = (_r)->sring->rsp_prod; \
++ } while (0)
++
+ /*
+ * Only used by initial domain which must create its own control-interface
+ * event channel. This value is picked up by the user-space domain controller
+ * via an ioctl.
+ */
+ int initdom_ctrlif_domcontroller_port = -1;
+
+ static int ctrl_if_evtchn;
+ static int ctrl_if_irq;
+ static spinlock_t ctrl_if_lock;
+
+ static struct irqaction ctrl_if_irq_action;
+
-#define TX_FULL(_c) \
- (((_c)->tx_req_prod - ctrl_if_tx_resp_cons) == CONTROL_RING_SIZE)
++static ctrl_front_ring_t ctrl_if_tx_ring;
++static ctrl_back_ring_t ctrl_if_rx_ring;
+
+ /* Incoming message requests. */
+ /* Primary message type -> message handler. */
+ static ctrl_msg_handler_t ctrl_if_rxmsg_handler[256];
+ /* Primary message type -> callback in process context? */
+ static unsigned long ctrl_if_rxmsg_blocking_context[256/sizeof(unsigned long)];
+ /* Is it late enough during bootstrap to use schedule_task()? */
+ static int safe_to_schedule_task;
+ /* Queue up messages to be handled in process context. */
+ static ctrl_msg_t ctrl_if_rxmsg_deferred[CONTROL_RING_SIZE];
+ static CONTROL_RING_IDX ctrl_if_rxmsg_deferred_prod;
+ static CONTROL_RING_IDX ctrl_if_rxmsg_deferred_cons;
+
+ /* Incoming message responses: message identifier -> message handler/id. */
+ static struct {
+ ctrl_msg_handler_t fn;
+ unsigned long id;
+ } ctrl_if_txmsg_id_mapping[CONTROL_RING_SIZE];
+
+ /* For received messages that must be deferred to process context. */
+ static void __ctrl_if_rxmsg_deferred(void *unused);
+ static DECLARE_WORK(ctrl_if_rxmsg_deferred_work,
+ __ctrl_if_rxmsg_deferred,
+ NULL);
+
+ /* Deferred callbacks for people waiting for space in the transmit ring. */
+ static DECLARE_TASK_QUEUE(ctrl_if_tx_tq);
+
+ static DECLARE_WAIT_QUEUE_HEAD(ctrl_if_tx_wait);
+ static void __ctrl_if_tx_tasklet(unsigned long data);
+ static DECLARE_TASKLET(ctrl_if_tx_tasklet, __ctrl_if_tx_tasklet, 0);
+
+ static void __ctrl_if_rx_tasklet(unsigned long data);
+ static DECLARE_TASKLET(ctrl_if_rx_tasklet, __ctrl_if_rx_tasklet, 0);
+
+ #define get_ctrl_if() ((control_if_t *)((char *)HYPERVISOR_shared_info + 2048))
- control_if_t *ctrl_if = get_ctrl_if();
- ctrl_msg_t *msg;
- int was_full = TX_FULL(ctrl_if);
- CONTROL_RING_IDX rp;
+
+ static void ctrl_if_notify_controller(void)
+ {
+ notify_via_evtchn(ctrl_if_evtchn);
+ }
+
+ static void ctrl_if_rxmsg_default_handler(ctrl_msg_t *msg, unsigned long id)
+ {
+ msg->length = 0;
+ ctrl_if_send_response(msg);
+ }
+
+ static void __ctrl_if_tx_tasklet(unsigned long data)
+ {
- rp = ctrl_if->tx_resp_prod;
++ ctrl_msg_t *msg;
++ int was_full = RING_FULL(&ctrl_if_tx_ring);
++ RING_IDX i, rp;
+
- while ( ctrl_if_tx_resp_cons != rp )
++ i = ctrl_if_tx_ring.rsp_cons;
++ rp = ctrl_if_tx_ring.sring->rsp_prod;
+ rmb(); /* Ensure we see all requests up to 'rp'. */
+
- msg = &ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if_tx_resp_cons)];
-
- DPRINTK("Rx-Rsp %u/%u :: %d/%d\n",
- ctrl_if_tx_resp_cons,
- ctrl_if->tx_resp_prod,
++ for ( ; i != rp; i++ )
+ {
-
- /*
- * Step over the message in the ring /after/ finishing reading it. As
- * soon as the index is updated then the message may get blown away.
- */
- smp_mb();
- ctrl_if_tx_resp_cons++;
++ msg = RING_GET_RESPONSE(&ctrl_if_tx_ring, i);
++
++ DPRINTK("Rx-Rsp %u/%u :: %d/%d\n", i-1,
++ ctrl_if_tx_ring.sring->rsp_prod,
+ msg->type, msg->subtype);
+
+ /* Execute the callback handler, if one was specified. */
+ if ( msg->id != 0xFF )
+ {
+ (*ctrl_if_txmsg_id_mapping[msg->id].fn)(
+ msg, ctrl_if_txmsg_id_mapping[msg->id].id);
+ smp_mb(); /* Execute, /then/ free. */
+ ctrl_if_txmsg_id_mapping[msg->id].fn = NULL;
+ }
- if ( was_full && !TX_FULL(ctrl_if) )
+ }
+
- control_if_t *ctrl_if = get_ctrl_if();
++ /*
++ * Step over messages in the ring /after/ finishing reading them. As soon
++ * as the index is updated then the message may get blown away.
++ */
++ smp_mb();
++ ctrl_if_tx_ring.rsp_cons = i;
++
++ if ( was_full && !RING_FULL(&ctrl_if_tx_ring) )
+ {
+ wake_up(&ctrl_if_tx_wait);
+ run_task_queue(&ctrl_if_tx_tq);
+ }
+ }
+
+ static void __ctrl_if_rxmsg_deferred(void *unused)
+ {
+ ctrl_msg_t *msg;
+ CONTROL_RING_IDX dp;
+
+ dp = ctrl_if_rxmsg_deferred_prod;
+ rmb(); /* Ensure we see all deferred requests up to 'dp'. */
+
+ while ( ctrl_if_rxmsg_deferred_cons != dp )
+ {
+ msg = &ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(
+ ctrl_if_rxmsg_deferred_cons++)];
+ (*ctrl_if_rxmsg_handler[msg->type])(msg, 0);
+ }
+ }
+
+ static void __ctrl_if_rx_tasklet(unsigned long data)
+ {
- CONTROL_RING_IDX rp, dp;
+ ctrl_msg_t msg, *pmsg;
- rp = ctrl_if->rx_req_prod;
++ CONTROL_RING_IDX dp;
++ RING_IDX rp, i;
+
++ i = ctrl_if_rx_ring.req_cons;
++ rp = ctrl_if_rx_ring.sring->req_prod;
+ dp = ctrl_if_rxmsg_deferred_prod;
-
- while ( ctrl_if_rx_req_cons != rp )
+ rmb(); /* Ensure we see all requests up to 'rp'. */
- pmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if_rx_req_cons++)];
++
++ for ( ; i != rp; i++)
+ {
- DPRINTK("Rx-Req %u/%u :: %d/%d\n",
- ctrl_if_rx_req_cons-1,
- ctrl_if->rx_req_prod,
++ pmsg = RING_GET_REQUEST(&ctrl_if_rx_ring, i);
+ memcpy(&msg, pmsg, offsetof(ctrl_msg_t, msg));
+
- control_if_t *ctrl_if = get_ctrl_if();
-
- if ( ctrl_if_tx_resp_cons != ctrl_if->tx_resp_prod )
++ DPRINTK("Rx-Req %u/%u :: %d/%d\n", i-1,
++ ctrl_if_rx_ring.sring->req_prod,
+ msg.type, msg.subtype);
+
++ if ( msg.length > sizeof(msg.msg) )
++ msg.length = sizeof(msg.msg);
++
+ if ( msg.length != 0 )
+ memcpy(msg.msg, pmsg->msg, msg.length);
+
+ if ( test_bit(msg.type,
+ (unsigned long *)&ctrl_if_rxmsg_blocking_context) )
+ memcpy(&ctrl_if_rxmsg_deferred[MASK_CONTROL_IDX(dp++)],
+ &msg, offsetof(ctrl_msg_t, msg) + msg.length);
+ else
+ (*ctrl_if_rxmsg_handler[msg.type])(&msg, 0);
+ }
+
++ ctrl_if_rx_ring.req_cons = i;
++
+ if ( dp != ctrl_if_rxmsg_deferred_prod )
+ {
+ wmb();
+ ctrl_if_rxmsg_deferred_prod = dp;
+ schedule_work(&ctrl_if_rxmsg_deferred_work);
+ }
+ }
+
+ static irqreturn_t ctrl_if_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs)
+ {
- if ( ctrl_if_rx_req_cons != ctrl_if->rx_req_prod )
++ if ( RING_HAS_UNCONSUMED_RESPONSES(&ctrl_if_tx_ring) )
+ tasklet_schedule(&ctrl_if_tx_tasklet);
+
- control_if_t *ctrl_if = get_ctrl_if();
++ if ( RING_HAS_UNCONSUMED_REQUESTS(&ctrl_if_rx_ring) )
+ tasklet_schedule(&ctrl_if_rx_tasklet);
+
+ return IRQ_HANDLED;
+ }
+
+ int
+ ctrl_if_send_message_noblock(
+ ctrl_msg_t *msg,
+ ctrl_msg_handler_t hnd,
+ unsigned long id)
+ {
- if ( TX_FULL(ctrl_if) )
+ unsigned long flags;
++ ctrl_msg_t *dmsg;
+ int i;
+
+ spin_lock_irqsave(&ctrl_if_lock, flags);
+
- ctrl_if->tx_req_prod,
- ctrl_if_tx_resp_cons,
++ if ( RING_FULL(&ctrl_if_tx_ring) )
+ {
+ spin_unlock_irqrestore(&ctrl_if_lock, flags);
+ return -EAGAIN;
+ }
+
+ msg->id = 0xFF;
+ if ( hnd != NULL )
+ {
+ for ( i = 0; ctrl_if_txmsg_id_mapping[i].fn != NULL; i++ )
+ continue;
+ ctrl_if_txmsg_id_mapping[i].fn = hnd;
+ ctrl_if_txmsg_id_mapping[i].id = id;
+ msg->id = i;
+ }
+
+ DPRINTK("Tx-Req %u/%u :: %d/%d\n",
- memcpy(&ctrl_if->tx_ring[MASK_CONTROL_IDX(ctrl_if->tx_req_prod)],
- msg, sizeof(*msg));
- wmb(); /* Write the message before letting the controller peek at it. */
- ctrl_if->tx_req_prod++;
++ ctrl_if_tx_ring.req_prod_pvt,
++ ctrl_if_tx_ring.rsp_cons,
+ msg->type, msg->subtype);
+
- control_if_t *ctrl_if = get_ctrl_if();
-
++ dmsg = RING_GET_REQUEST(&ctrl_if_tx_ring,
++ ctrl_if_tx_ring.req_prod_pvt);
++ memcpy(dmsg, msg, sizeof(*msg));
++ ctrl_if_tx_ring.req_prod_pvt++;
++ RING_PUSH_REQUESTS(&ctrl_if_tx_ring);
+
+ spin_unlock_irqrestore(&ctrl_if_lock, flags);
+
+ ctrl_if_notify_controller();
+
+ return 0;
+ }
+
+ int
+ ctrl_if_send_message_block(
+ ctrl_msg_t *msg,
+ ctrl_msg_handler_t hnd,
+ unsigned long id,
+ long wait_state)
+ {
+ DECLARE_WAITQUEUE(wait, current);
+ int rc;
+
+ /* Fast path. */
+ if ( (rc = ctrl_if_send_message_noblock(msg, hnd, id)) != -EAGAIN )
+ return rc;
+
+ add_wait_queue(&ctrl_if_tx_wait, &wait);
+
+ for ( ; ; )
+ {
+ set_current_state(wait_state);
+
+ if ( (rc = ctrl_if_send_message_noblock(msg, hnd, id)) != -EAGAIN )
+ break;
+
+ rc = -ERESTARTSYS;
+ if ( signal_pending(current) && (wait_state == TASK_INTERRUPTIBLE) )
+ break;
+
+ schedule();
+ }
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&ctrl_if_tx_wait, &wait);
+
+ return rc;
+ }
+
+ /* Allow a reponse-callback handler to find context of a blocked requester. */
+ struct rsp_wait {
+ ctrl_msg_t *msg; /* Buffer for the response message. */
+ struct task_struct *task; /* The task that is blocked on the response. */
+ int done; /* Indicate to 'task' that response is rcv'ed. */
+ };
+
+ static void __ctrl_if_get_response(ctrl_msg_t *msg, unsigned long id)
+ {
+ struct rsp_wait *wait = (struct rsp_wait *)id;
+ struct task_struct *task = wait->task;
+
+ memcpy(wait->msg, msg, sizeof(*msg));
+ wmb();
+ wait->done = 1;
+
+ wake_up_process(task);
+ }
+
+ int
+ ctrl_if_send_message_and_get_response(
+ ctrl_msg_t *msg,
+ ctrl_msg_t *rmsg,
+ long wait_state)
+ {
+ struct rsp_wait wait;
+ int rc;
+
+ wait.msg = rmsg;
+ wait.done = 0;
+ wait.task = current;
+
+ if ( (rc = ctrl_if_send_message_block(msg, __ctrl_if_get_response,
+ (unsigned long)&wait,
+ wait_state)) != 0 )
+ return rc;
+
+ for ( ; ; )
+ {
+ /* NB. Can't easily support TASK_INTERRUPTIBLE here. */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if ( wait.done )
+ break;
+ schedule();
+ }
+
+ set_current_state(TASK_RUNNING);
+ return 0;
+ }
+
+ int
+ ctrl_if_enqueue_space_callback(
+ struct tq_struct *task)
+ {
- if ( !TX_FULL(ctrl_if) )
+ /* Fast path. */
- return TX_FULL(ctrl_if);
++ if ( !RING_FULL(&ctrl_if_tx_ring) )
+ return 0;
+
+ (void)queue_task(task, &ctrl_if_tx_tq);
+
+ /*
+ * We may race execution of the task queue, so return re-checked status. If
+ * the task is not executed despite the ring being non-full then we will
+ * certainly return 'not full'.
+ */
+ smp_mb();
- control_if_t *ctrl_if = get_ctrl_if();
++ return RING_FULL(&ctrl_if_tx_ring);
+ }
+
+ void
+ ctrl_if_send_response(
+ ctrl_msg_t *msg)
+ {
- ctrl_if->rx_resp_prod,
+ unsigned long flags;
+ ctrl_msg_t *dmsg;
+
+ /*
+ * NB. The response may the original request message, modified in-place.
+ * In this situation we may have src==dst, so no copying is required.
+ */
+ spin_lock_irqsave(&ctrl_if_lock, flags);
+
+ DPRINTK("Tx-Rsp %u :: %d/%d\n",
- dmsg = &ctrl_if->rx_ring[MASK_CONTROL_IDX(ctrl_if->rx_resp_prod)];
++ ctrl_if_rx_ring.rsp_prod_pvt,
+ msg->type, msg->subtype);
+
- wmb(); /* Write the message before letting the controller peek at it. */
- ctrl_if->rx_resp_prod++;
++ dmsg = RING_GET_RESPONSE(&ctrl_if_rx_ring,
++ ctrl_if_rx_ring.rsp_prod_pvt);
+ if ( dmsg != msg )
+ memcpy(dmsg, msg, sizeof(*msg));
+
- ctrl_if_tx_resp_cons = ctrl_if->tx_resp_prod;
- ctrl_if_rx_req_cons = ctrl_if->rx_resp_prod;
++ ctrl_if_rx_ring.rsp_prod_pvt++;
++ RING_PUSH_RESPONSES(&ctrl_if_rx_ring);
+
+ spin_unlock_irqrestore(&ctrl_if_lock, flags);
+
+ ctrl_if_notify_controller();
+ }
+
+ int
+ ctrl_if_register_receiver(
+ u8 type,
+ ctrl_msg_handler_t hnd,
+ unsigned int flags)
+ {
+ unsigned long _flags;
+ int inuse;
+
+ spin_lock_irqsave(&ctrl_if_lock, _flags);
+
+ inuse = (ctrl_if_rxmsg_handler[type] != ctrl_if_rxmsg_default_handler);
+
+ if ( inuse )
+ {
+ printk(KERN_INFO "Receiver %p already established for control "
+ "messages of type %d.\n", ctrl_if_rxmsg_handler[type], type);
+ }
+ else
+ {
+ ctrl_if_rxmsg_handler[type] = hnd;
+ clear_bit(type, (unsigned long *)&ctrl_if_rxmsg_blocking_context);
+ if ( flags == CALLBACK_IN_BLOCKING_CONTEXT )
+ {
+ set_bit(type, (unsigned long *)&ctrl_if_rxmsg_blocking_context);
+ if ( !safe_to_schedule_task )
+ BUG();
+ }
+ }
+
+ spin_unlock_irqrestore(&ctrl_if_lock, _flags);
+
+ return !inuse;
+ }
+
+ void
+ ctrl_if_unregister_receiver(
+ u8 type,
+ ctrl_msg_handler_t hnd)
+ {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl_if_lock, flags);
+
+ if ( ctrl_if_rxmsg_handler[type] != hnd )
+ printk(KERN_INFO "Receiver %p is not registered for control "
+ "messages of type %d.\n", hnd, type);
+ else
+ ctrl_if_rxmsg_handler[type] = ctrl_if_rxmsg_default_handler;
+
+ spin_unlock_irqrestore(&ctrl_if_lock, flags);
+
+ /* Ensure that @hnd will not be executed after this function returns. */
+ tasklet_unlock_wait(&ctrl_if_rx_tasklet);
+ }
+
+ void ctrl_if_suspend(void)
+ {
+ teardown_irq(ctrl_if_irq, &ctrl_if_irq_action);
+ unbind_evtchn_from_irq(ctrl_if_evtchn);
+ }
+
+ void ctrl_if_resume(void)
+ {
+ control_if_t *ctrl_if = get_ctrl_if();
+
+ if ( xen_start_info.flags & SIF_INITDOMAIN )
+ {
+ /*
+ * The initial domain must create its own domain-controller link.
+ * The controller is probably not running at this point, but will
+ * pick up its end of the event channel from
+ */
+ evtchn_op_t op;
+ op.cmd = EVTCHNOP_bind_interdomain;
+ op.u.bind_interdomain.dom1 = DOMID_SELF;
+ op.u.bind_interdomain.dom2 = DOMID_SELF;
+ op.u.bind_interdomain.port1 = 0;
+ op.u.bind_interdomain.port2 = 0;
+ if ( HYPERVISOR_event_channel_op(&op) != 0 )
+ BUG();
+ xen_start_info.domain_controller_evtchn = op.u.bind_interdomain.port1;
+ initdom_ctrlif_domcontroller_port = op.u.bind_interdomain.port2;
+ }
+
+ /* Sync up with shared indexes. */
- int i;
++ FRONT_RING_ATTACH(&ctrl_if_tx_ring, &ctrl_if->tx_ring);
++ BACK_RING_ATTACH(&ctrl_if_rx_ring, &ctrl_if->rx_ring);
+
+ ctrl_if_evtchn = xen_start_info.domain_controller_evtchn;
+ ctrl_if_irq = bind_evtchn_to_irq(ctrl_if_evtchn);
+
+ memset(&ctrl_if_irq_action, 0, sizeof(ctrl_if_irq_action));
+ ctrl_if_irq_action.handler = ctrl_if_interrupt;
+ ctrl_if_irq_action.name = "ctrl-if";
+ (void)setup_irq(ctrl_if_irq, &ctrl_if_irq_action);
+ }
+
+ void __init ctrl_if_init(void)
+ {
- return (get_ctrl_if()->tx_req_prod == ctrl_if_tx_resp_cons);
++ control_if_t *ctrl_if = get_ctrl_if();
++ int i;
+
+ for ( i = 0; i < 256; i++ )
+ ctrl_if_rxmsg_handler[i] = ctrl_if_rxmsg_default_handler;
+
++ FRONT_RING_ATTACH(&ctrl_if_tx_ring, &ctrl_if->tx_ring);
++ BACK_RING_ATTACH(&ctrl_if_rx_ring, &ctrl_if->rx_ring);
++
+ spin_lock_init(&ctrl_if_lock);
+
+ ctrl_if_resume();
+ }
+
+
+ /* This is called after it is safe to call schedule_task(). */
+ static int __init ctrl_if_late_setup(void)
+ {
+ safe_to_schedule_task = 1;
+ return 0;
+ }
+ __initcall(ctrl_if_late_setup);
+
+
+ /*
+ * !! The following are DANGEROUS FUNCTIONS !!
+ * Use with care [for example, see xencons_force_flush()].
+ */
+
+ int ctrl_if_transmitter_empty(void)
+ {
- ctrl_if_tx_resp_cons = get_ctrl_if()->tx_resp_prod;
++ return (ctrl_if_tx_ring.sring->req_prod == ctrl_if_tx_ring.rsp_cons);
++
+ }
+
+ void ctrl_if_discard_responses(void)
+ {
++ RING_DROP_PENDING_RESPONSES(&ctrl_if_tx_ring);
+ }
+
+ EXPORT_SYMBOL(ctrl_if_send_message_noblock);
+ EXPORT_SYMBOL(ctrl_if_send_message_block);
+ EXPORT_SYMBOL(ctrl_if_send_message_and_get_response);
+ EXPORT_SYMBOL(ctrl_if_enqueue_space_callback);
+ EXPORT_SYMBOL(ctrl_if_send_response);
+ EXPORT_SYMBOL(ctrl_if_register_receiver);
+ EXPORT_SYMBOL(ctrl_if_unregister_receiver);
--- /dev/null
-static int virq_to_irq[NR_VIRQS];
+ /******************************************************************************
+ * evtchn.c
+ *
+ * Communication via Xen event channels.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+ #include <linux/config.h>
+ #include <linux/module.h>
+ #include <linux/irq.h>
+ #include <linux/interrupt.h>
+ #include <linux/sched.h>
+ #include <linux/kernel_stat.h>
+ #include <linux/version.h>
+ #include <asm/atomic.h>
+ #include <asm/system.h>
+ #include <asm/ptrace.h>
+ #include <asm/synch_bitops.h>
+ #include <asm-xen/xen-public/event_channel.h>
+ #include <asm-xen/xen-public/physdev.h>
+ #include <asm-xen/ctrl_if.h>
+ #include <asm-xen/hypervisor.h>
++#include <asm-xen/evtchn.h>
+
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ EXPORT_SYMBOL(force_evtchn_callback);
+ EXPORT_SYMBOL(evtchn_do_upcall);
+ #endif
+
+ /*
+ * This lock protects updates to the following mapping and reference-count
+ * arrays. The lock does not need to be acquired to read the mapping tables.
+ */
+ static spinlock_t irq_mapping_update_lock;
+
+ /* IRQ <-> event-channel mappings. */
+ static int evtchn_to_irq[NR_EVENT_CHANNELS];
+ static int irq_to_evtchn[NR_IRQS];
+
+ /* IRQ <-> VIRQ mapping. */
- s->vcpu_data[0].evtchn_upcall_pending = 0;
-
++DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]);
++
++/* evtchn <-> IPI mapping. */
++#ifndef NR_IPIS
++#define NR_IPIS 1
++#endif
++DEFINE_PER_CPU(int, ipi_to_evtchn[NR_IPIS]);
+
+ /* Reference counts for bindings to IRQs. */
+ static int irq_bindcount[NR_IRQS];
+
+ /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
+ static unsigned long pirq_needs_unmask_notify[NR_PIRQS/sizeof(unsigned long)];
+
+ /* Upcall to generic IRQ layer. */
+ #ifdef CONFIG_X86
+ #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
+ extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
+ #else
+ extern asmlinkage unsigned int do_IRQ(struct pt_regs *regs);
+ #endif
+ #define do_IRQ(irq, regs) do { \
+ (regs)->orig_eax = (irq); \
+ do_IRQ((regs)); \
+ } while (0)
+ #endif
+
+ #define VALID_EVTCHN(_chn) ((_chn) >= 0)
+
+ /*
+ * Force a proper event-channel callback from Xen after clearing the
+ * callback mask. We do this in a very simple manner, by making a call
+ * down into Xen. The pending flag will be checked by Xen on return.
+ */
+ void force_evtchn_callback(void)
+ {
+ (void)HYPERVISOR_xen_version(0);
+ }
+
+ /* NB. Interrupts are disabled on entry. */
+ asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
+ {
+ unsigned long l1, l2;
+ unsigned int l1i, l2i, port;
+ int irq;
+ shared_info_t *s = HYPERVISOR_shared_info;
++ vcpu_info_t *vcpu_info = &s->vcpu_data[smp_processor_id()];
+
- l1 = xchg(&s->evtchn_pending_sel, 0);
++ vcpu_info->evtchn_upcall_pending = 0;
++
+ /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
- l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i];
- while ( l2 != 0 )
++ l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
+ while ( l1 != 0 )
+ {
+ l1i = __ffs(l1);
+ l1 &= ~(1 << l1i);
+
- if ( (irq = virq_to_irq[virq]) == -1 )
++ while ( (l2 = s->evtchn_pending[l1i] & ~s->evtchn_mask[l1i]) != 0 )
+ {
+ l2i = __ffs(l2);
+ l2 &= ~(1 << l2i);
+
+ port = (l1i << 5) + l2i;
+ if ( (irq = evtchn_to_irq[port]) != -1 )
+ do_IRQ(irq, regs);
+ else
+ evtchn_device_upcall(port);
+ }
+ }
+ }
+
+ static int find_unbound_irq(void)
+ {
+ int irq;
+
+ for ( irq = 0; irq < NR_IRQS; irq++ )
+ if ( irq_bindcount[irq] == 0 )
+ break;
+
+ if ( irq == NR_IRQS )
+ panic("No available IRQ to bind to: increase NR_IRQS!\n");
+
+ return irq;
+ }
+
+ int bind_virq_to_irq(int virq)
+ {
+ evtchn_op_t op;
+ int evtchn, irq;
++ int cpu = smp_processor_id();
+
+ spin_lock(&irq_mapping_update_lock);
+
- virq_to_irq[virq] = irq;
++ if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
+ {
+ op.cmd = EVTCHNOP_bind_virq;
+ op.u.bind_virq.virq = virq;
+ if ( HYPERVISOR_event_channel_op(&op) != 0 )
+ panic("Failed to bind virtual IRQ %d\n", virq);
+ evtchn = op.u.bind_virq.port;
+
+ irq = find_unbound_irq();
+ evtchn_to_irq[evtchn] = irq;
+ irq_to_evtchn[irq] = evtchn;
+
- int irq = virq_to_irq[virq];
++ per_cpu(virq_to_irq, cpu)[virq] = irq;
+ }
+
+ irq_bindcount[irq]++;
+
+ spin_unlock(&irq_mapping_update_lock);
+
+ return irq;
+ }
+
+ void unbind_virq_from_irq(int virq)
+ {
+ evtchn_op_t op;
- virq_to_irq[virq] = -1;
++ int cpu = smp_processor_id();
++ int irq = per_cpu(virq_to_irq, cpu)[virq];
+ int evtchn = irq_to_evtchn[irq];
+
+ spin_lock(&irq_mapping_update_lock);
+
+ if ( --irq_bindcount[irq] == 0 )
+ {
+ op.cmd = EVTCHNOP_close;
+ op.u.close.dom = DOMID_SELF;
+ op.u.close.port = evtchn;
+ if ( HYPERVISOR_event_channel_op(&op) != 0 )
+ panic("Failed to unbind virtual IRQ %d\n", virq);
+
+ evtchn_to_irq[evtchn] = -1;
+ irq_to_evtchn[irq] = -1;
-static irqreturn_t misdirect_interrupt(int irq, void *dev_id,
- struct pt_regs *regs)
-{
- /* nothing */
- return IRQ_HANDLED;
-}
-
-static struct irqaction misdirect_action = {
- misdirect_interrupt,
- SA_INTERRUPT,
- CPU_MASK_NONE,
- "misdirect",
- NULL,
- NULL
-};
-
++ per_cpu(virq_to_irq, cpu)[virq] = -1;
++ }
++
++ spin_unlock(&irq_mapping_update_lock);
++}
++
++int bind_ipi_on_cpu_to_irq(int cpu, int ipi)
++{
++ evtchn_op_t op;
++ int evtchn, irq;
++
++ spin_lock(&irq_mapping_update_lock);
++
++ if ( (evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi]) == 0 )
++ {
++ op.cmd = EVTCHNOP_bind_ipi;
++ op.u.bind_ipi.ipi_edom = cpu;
++ if ( HYPERVISOR_event_channel_op(&op) != 0 )
++ panic("Failed to bind virtual IPI %d on cpu %d\n", ipi, cpu);
++ evtchn = op.u.bind_ipi.port;
++
++ irq = find_unbound_irq();
++ evtchn_to_irq[evtchn] = irq;
++ irq_to_evtchn[irq] = evtchn;
++
++ per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
++ } else
++ irq = evtchn_to_irq[evtchn];
++
++ irq_bindcount[irq]++;
++
++ spin_unlock(&irq_mapping_update_lock);
++
++ return irq;
++}
++
++void unbind_ipi_on_cpu_from_irq(int cpu, int ipi)
++{
++ evtchn_op_t op;
++ int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi];
++ int irq = irq_to_evtchn[evtchn];
++
++ spin_lock(&irq_mapping_update_lock);
++
++ if ( --irq_bindcount[irq] == 0 )
++ {
++ op.cmd = EVTCHNOP_close;
++ op.u.close.dom = DOMID_SELF;
++ op.u.close.port = evtchn;
++ if ( HYPERVISOR_event_channel_op(&op) != 0 )
++ panic("Failed to unbind virtual IPI %d on cpu %d\n", ipi, cpu);
++
++ evtchn_to_irq[evtchn] = -1;
++ irq_to_evtchn[irq] = -1;
++ per_cpu(ipi_to_evtchn, cpu)[ipi] = 0;
+ }
+
+ spin_unlock(&irq_mapping_update_lock);
+ }
+
+ int bind_evtchn_to_irq(int evtchn)
+ {
+ int irq;
+
+ spin_lock(&irq_mapping_update_lock);
+
+ if ( (irq = evtchn_to_irq[evtchn]) == -1 )
+ {
+ irq = find_unbound_irq();
+ evtchn_to_irq[evtchn] = irq;
+ irq_to_evtchn[irq] = evtchn;
+ }
+
+ irq_bindcount[irq]++;
+
+ spin_unlock(&irq_mapping_update_lock);
+
+ return irq;
+ }
+
+ void unbind_evtchn_from_irq(int evtchn)
+ {
+ int irq = evtchn_to_irq[evtchn];
+
+ spin_lock(&irq_mapping_update_lock);
+
+ if ( --irq_bindcount[irq] == 0 )
+ {
+ evtchn_to_irq[evtchn] = -1;
+ irq_to_evtchn[irq] = -1;
+ }
+
+ spin_unlock(&irq_mapping_update_lock);
+ }
+
+
+ /*
+ * Interface to generic handling in irq.c
+ */
+
+ static unsigned int startup_dynirq(unsigned int irq)
+ {
+ int evtchn = irq_to_evtchn[irq];
+
+ if ( !VALID_EVTCHN(evtchn) )
+ return 0;
+ unmask_evtchn(evtchn);
+ return 0;
+ }
+
+ static void shutdown_dynirq(unsigned int irq)
+ {
+ int evtchn = irq_to_evtchn[irq];
+
+ if ( !VALID_EVTCHN(evtchn) )
+ return;
+ mask_evtchn(evtchn);
+ }
+
+ static void enable_dynirq(unsigned int irq)
+ {
+ int evtchn = irq_to_evtchn[irq];
+
+ unmask_evtchn(evtchn);
+ }
+
+ static void disable_dynirq(unsigned int irq)
+ {
+ int evtchn = irq_to_evtchn[irq];
+
+ mask_evtchn(evtchn);
+ }
+
+ static void ack_dynirq(unsigned int irq)
+ {
+ int evtchn = irq_to_evtchn[irq];
+
+ mask_evtchn(evtchn);
+ clear_evtchn(evtchn);
+ }
+
+ static void end_dynirq(unsigned int irq)
+ {
+ int evtchn = irq_to_evtchn[irq];
+
+ if ( !(irq_desc[irq].status & IRQ_DISABLED) )
+ unmask_evtchn(evtchn);
+ }
+
+ static struct hw_interrupt_type dynirq_type = {
+ "Dynamic-irq",
+ startup_dynirq,
+ shutdown_dynirq,
+ enable_dynirq,
+ disable_dynirq,
+ ack_dynirq,
+ end_dynirq,
+ NULL
+ };
+
+ static inline void pirq_unmask_notify(int pirq)
+ {
+ physdev_op_t op;
+ if ( unlikely(test_bit(pirq, &pirq_needs_unmask_notify[0])) )
+ {
+ op.cmd = PHYSDEVOP_IRQ_UNMASK_NOTIFY;
+ (void)HYPERVISOR_physdev_op(&op);
+ }
+ }
+
+ static inline void pirq_query_unmask(int pirq)
+ {
+ physdev_op_t op;
+ op.cmd = PHYSDEVOP_IRQ_STATUS_QUERY;
+ op.u.irq_status_query.irq = pirq;
+ (void)HYPERVISOR_physdev_op(&op);
+ clear_bit(pirq, &pirq_needs_unmask_notify[0]);
+ if ( op.u.irq_status_query.flags & PHYSDEVOP_IRQ_NEEDS_UNMASK_NOTIFY )
+ set_bit(pirq, &pirq_needs_unmask_notify[0]);
+ }
+
+ /*
+ * On startup, if there is no action associated with the IRQ then we are
+ * probing. In this case we should not share with others as it will confuse us.
+ */
+ #define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
+
+ static unsigned int startup_pirq(unsigned int irq)
+ {
+ evtchn_op_t op;
+ int evtchn;
+
+ op.cmd = EVTCHNOP_bind_pirq;
+ op.u.bind_pirq.pirq = irq;
+ /* NB. We are happy to share unless we are probing. */
+ op.u.bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
+ if ( HYPERVISOR_event_channel_op(&op) != 0 )
+ {
+ if ( !probing_irq(irq) ) /* Some failures are expected when probing. */
+ printk(KERN_INFO "Failed to obtain physical IRQ %d\n", irq);
+ return 0;
+ }
+ evtchn = op.u.bind_pirq.port;
+
+ pirq_query_unmask(irq_to_pirq(irq));
+
+ evtchn_to_irq[evtchn] = irq;
+ irq_to_evtchn[irq] = evtchn;
+
+ unmask_evtchn(evtchn);
+ pirq_unmask_notify(irq_to_pirq(irq));
+
+ return 0;
+ }
+
+ static void shutdown_pirq(unsigned int irq)
+ {
+ evtchn_op_t op;
+ int evtchn = irq_to_evtchn[irq];
+
+ if ( !VALID_EVTCHN(evtchn) )
+ return;
+
+ mask_evtchn(evtchn);
+
+ op.cmd = EVTCHNOP_close;
+ op.u.close.dom = DOMID_SELF;
+ op.u.close.port = evtchn;
+ if ( HYPERVISOR_event_channel_op(&op) != 0 )
+ panic("Failed to unbind physical IRQ %d\n", irq);
+
+ evtchn_to_irq[evtchn] = -1;
+ irq_to_evtchn[irq] = -1;
+ }
+
+ static void enable_pirq(unsigned int irq)
+ {
+ int evtchn = irq_to_evtchn[irq];
+ if ( !VALID_EVTCHN(evtchn) )
+ return;
+ unmask_evtchn(evtchn);
+ pirq_unmask_notify(irq_to_pirq(irq));
+ }
+
+ static void disable_pirq(unsigned int irq)
+ {
+ int evtchn = irq_to_evtchn[irq];
+ if ( !VALID_EVTCHN(evtchn) )
+ return;
+ mask_evtchn(evtchn);
+ }
+
+ static void ack_pirq(unsigned int irq)
+ {
+ int evtchn = irq_to_evtchn[irq];
+ if ( !VALID_EVTCHN(evtchn) )
+ return;
+ mask_evtchn(evtchn);
+ clear_evtchn(evtchn);
+ }
+
+ static void end_pirq(unsigned int irq)
+ {
+ int evtchn = irq_to_evtchn[irq];
+ if ( !VALID_EVTCHN(evtchn) )
+ return;
+ if ( !(irq_desc[irq].status & IRQ_DISABLED) )
+ {
+ unmask_evtchn(evtchn);
+ pirq_unmask_notify(irq_to_pirq(irq));
+ }
+ }
+
+ static struct hw_interrupt_type pirq_type = {
+ "Phys-irq",
+ startup_pirq,
+ shutdown_pirq,
+ enable_pirq,
+ disable_pirq,
+ ack_pirq,
+ end_pirq,
+ NULL
+ };
+
- if ( (irq = virq_to_irq[virq]) == -1 )
+ void irq_suspend(void)
+ {
+ int pirq, virq, irq, evtchn;
++ int cpu = smp_processor_id(); /* XXX */
+
+ /* Unbind VIRQs from event channels. */
+ for ( virq = 0; virq < NR_VIRQS; virq++ )
+ {
- if ( (irq = virq_to_irq[virq]) == -1 )
++ if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
+ continue;
+ evtchn = irq_to_evtchn[irq];
+
+ /* Mark the event channel as unused in our table. */
+ evtchn_to_irq[evtchn] = -1;
+ irq_to_evtchn[irq] = -1;
+ }
+
+ /* Check that no PIRQs are still bound. */
+ for ( pirq = 0; pirq < NR_PIRQS; pirq++ )
+ if ( (evtchn = irq_to_evtchn[pirq_to_irq(pirq)]) != -1 )
+ panic("Suspend attempted while PIRQ %d bound to evtchn %d.\n",
+ pirq, evtchn);
+ }
+
+ void irq_resume(void)
+ {
+ evtchn_op_t op;
+ int virq, irq, evtchn;
++ int cpu = smp_processor_id(); /* XXX */
+
+ for ( evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++ )
+ mask_evtchn(evtchn); /* New event-channel space is not 'live' yet. */
+
+ for ( virq = 0; virq < NR_VIRQS; virq++ )
+ {
- /* No VIRQ -> IRQ mappings. */
- for ( i = 0; i < NR_VIRQS; i++ )
- virq_to_irq[i] = -1;
++ if ( (irq = per_cpu(virq_to_irq, cpu)[virq]) == -1 )
+ continue;
+
+ /* Get a new binding from Xen. */
+ op.cmd = EVTCHNOP_bind_virq;
+ op.u.bind_virq.virq = virq;
+ if ( HYPERVISOR_event_channel_op(&op) != 0 )
+ panic("Failed to bind virtual IRQ %d\n", virq);
+ evtchn = op.u.bind_virq.port;
+
+ /* Record the new mapping. */
+ evtchn_to_irq[evtchn] = irq;
+ irq_to_evtchn[irq] = evtchn;
+
+ /* Ready for use. */
+ unmask_evtchn(evtchn);
+ }
+ }
+
+ void __init init_IRQ(void)
+ {
+ int i;
++ int cpu;
+
+ irq_ctx_init(0);
+
+ spin_lock_init(&irq_mapping_update_lock);
+
- (void)setup_irq(bind_virq_to_irq(VIRQ_MISDIRECT), &misdirect_action);
-
++ for ( cpu = 0; cpu < NR_CPUS; cpu++ ) {
++ /* No VIRQ -> IRQ mappings. */
++ for ( i = 0; i < NR_VIRQS; i++ )
++ per_cpu(virq_to_irq, cpu)[i] = -1;
++ }
+
+ /* No event-channel -> IRQ mappings. */
+ for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
+ {
+ evtchn_to_irq[i] = -1;
+ mask_evtchn(i); /* No event channels are 'live' right now. */
+ }
+
+ /* No IRQ -> event-channel mappings. */
+ for ( i = 0; i < NR_IRQS; i++ )
+ irq_to_evtchn[i] = -1;
+
+ for ( i = 0; i < NR_DYNIRQS; i++ )
+ {
+ /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
+ irq_bindcount[dynirq_to_irq(i)] = 0;
+
+ irq_desc[dynirq_to_irq(i)].status = IRQ_DISABLED;
+ irq_desc[dynirq_to_irq(i)].action = 0;
+ irq_desc[dynirq_to_irq(i)].depth = 1;
+ irq_desc[dynirq_to_irq(i)].handler = &dynirq_type;
+ }
+
+ for ( i = 0; i < NR_PIRQS; i++ )
+ {
+ /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
+ irq_bindcount[pirq_to_irq(i)] = 1;
+
+ irq_desc[pirq_to_irq(i)].status = IRQ_DISABLED;
+ irq_desc[pirq_to_irq(i)].action = 0;
+ irq_desc[pirq_to_irq(i)].depth = 1;
+ irq_desc[pirq_to_irq(i)].handler = &pirq_type;
+ }
+
+ /* This needs to be done early, but after the IRQ subsystem is alive. */
+ ctrl_if_init();
+ }
--- /dev/null
-gnttab_end_foreign_access(
- grant_ref_t ref, int readonly)
+ /******************************************************************************
+ * gnttab.c
+ *
+ * Two sets of functionality:
+ * 1. Granting foreign access to our memory reservation.
+ * 2. Accessing others' memory reservations via grant references.
+ * (i.e., mechanisms for both sender and recipient of grant references)
+ *
++ * Copyright (c) 2005, Christopher Clark
+ * Copyright (c) 2004, K A Fraser
+ */
+
+ #include <linux/config.h>
+ #include <linux/module.h>
+ #include <linux/sched.h>
+ #include <asm/pgtable.h>
+ #include <asm/fixmap.h>
++#include <asm/uaccess.h>
++#include <asm-xen/xen_proc.h>
++#include <asm-xen/linux-public/privcmd.h>
+ #include <asm-xen/gnttab.h>
+
+ #ifndef set_fixmap_ma
+ #define set_fixmap_ma set_fixmap
+ #endif
+
+ #if 1
+ #define ASSERT(_p) \
+ if ( !(_p) ) { printk(KERN_ALERT"Assertion '%s': line %d, file %s\n", \
+ #_p , __LINE__, __FILE__); *(int*)0=0; }
+ #else
+ #define ASSERT(_p) ((void)0)
+ #endif
+
++#define WPRINTK(fmt, args...) \
++ printk(KERN_WARNING "xen_grant: " fmt, ##args)
++
++
+ EXPORT_SYMBOL(gnttab_grant_foreign_access);
+ EXPORT_SYMBOL(gnttab_end_foreign_access);
++EXPORT_SYMBOL(gnttab_query_foreign_access);
+ EXPORT_SYMBOL(gnttab_grant_foreign_transfer);
+ EXPORT_SYMBOL(gnttab_end_foreign_transfer);
+
+ #define NR_GRANT_REFS 512
+ static grant_ref_t gnttab_free_list[NR_GRANT_REFS];
+ static grant_ref_t gnttab_free_head;
+
+ static grant_entry_t *shared;
+
++/* /proc/xen/grant */
++static struct proc_dir_entry *grant_pde;
++
++
+ /*
+ * Lock-free grant-entry allocator
+ */
+
+ static inline int
+ get_free_entry(
+ void)
+ {
+ grant_ref_t fh, nfh = gnttab_free_head;
+ do { if ( unlikely((fh = nfh) == NR_GRANT_REFS) ) return -1; }
+ while ( unlikely((nfh = cmpxchg(&gnttab_free_head, fh,
+ gnttab_free_list[fh])) != fh) );
+ return fh;
+ }
+
+ static inline void
+ put_free_entry(
+ grant_ref_t ref)
+ {
+ grant_ref_t fh, nfh = gnttab_free_head;
+ do { gnttab_free_list[ref] = fh = nfh; wmb(); }
+ while ( unlikely((nfh = cmpxchg(&gnttab_free_head, fh, ref)) != fh) );
+ }
+
+ /*
+ * Public grant-issuing interface functions
+ */
+
+ int
+ gnttab_grant_foreign_access(
+ domid_t domid, unsigned long frame, int readonly)
+ {
+ int ref;
+
+ if ( unlikely((ref = get_free_entry()) == -1) )
+ return -ENOSPC;
+
+ shared[ref].frame = frame;
+ shared[ref].domid = domid;
+ wmb();
+ shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
+
+ return ref;
+ }
+
++int
++gnttab_query_foreign_access( grant_ref_t ref )
++{
++ u16 nflags;
++
++ nflags = shared[ref].flags;
++
++ return ( nflags & (GTF_reading|GTF_writing) );
++}
++
+ void
-void __init gnttab_init(void)
++gnttab_end_foreign_access( grant_ref_t ref, int readonly )
+ {
+ u16 flags, nflags;
+
+ nflags = shared[ref].flags;
+ do {
+ if ( (flags = nflags) & (GTF_reading|GTF_writing) )
+ printk(KERN_ALERT "WARNING: g.e. still in use!\n");
+ }
+ while ( (nflags = cmpxchg(&shared[ref].flags, flags, 0)) != flags );
+
+ put_free_entry(ref);
+ }
+
+ int
+ gnttab_grant_foreign_transfer(
+ domid_t domid)
+ {
+ int ref;
+
+ if ( unlikely((ref = get_free_entry()) == -1) )
+ return -ENOSPC;
+
+ shared[ref].frame = 0;
+ shared[ref].domid = domid;
+ wmb();
+ shared[ref].flags = GTF_accept_transfer;
+
+ return ref;
+ }
+
+ unsigned long
+ gnttab_end_foreign_transfer(
+ grant_ref_t ref)
+ {
+ unsigned long frame = 0;
+ u16 flags;
+
+ flags = shared[ref].flags;
+ ASSERT(flags == (GTF_accept_transfer | GTF_transfer_committed));
+
+ /*
+ * If a transfer is committed then wait for the frame address to appear.
+ * Otherwise invalidate the grant entry against future use.
+ */
+ if ( likely(flags != GTF_accept_transfer) ||
+ (cmpxchg(&shared[ref].flags, flags, 0) != GTF_accept_transfer) )
+ while ( unlikely((frame = shared[ref].frame) == 0) )
+ cpu_relax();
+
+ put_free_entry(ref);
+
+ return frame;
+ }
+
++static int grant_ioctl(struct inode *inode, struct file *file,
++ unsigned int cmd, unsigned long data)
++{
++ int ret;
++ privcmd_hypercall_t hypercall;
++
++ /* XXX Need safety checks here if using for anything other
++ * than debugging */
++ return -ENOSYS;
++
++ if ( cmd != IOCTL_PRIVCMD_HYPERCALL )
++ return -ENOSYS;
++
++ if ( copy_from_user(&hypercall, (void *)data, sizeof(hypercall)) )
++ return -EFAULT;
++
++ if ( hypercall.op != __HYPERVISOR_grant_table_op )
++ return -ENOSYS;
++
++ /* hypercall-invoking asm taken from privcmd.c */
++ __asm__ __volatile__ (
++ "pushl %%ebx; pushl %%ecx; pushl %%edx; pushl %%esi; pushl %%edi; "
++ "movl 4(%%eax),%%ebx ;"
++ "movl 8(%%eax),%%ecx ;"
++ "movl 12(%%eax),%%edx ;"
++ "movl 16(%%eax),%%esi ;"
++ "movl 20(%%eax),%%edi ;"
++ "movl (%%eax),%%eax ;"
++ TRAP_INSTR "; "
++ "popl %%edi; popl %%esi; popl %%edx; popl %%ecx; popl %%ebx"
++ : "=a" (ret) : "0" (&hypercall) : "memory" );
++
++ return ret;
++}
++
++static struct file_operations grant_file_ops = {
++ ioctl: grant_ioctl,
++};
++
++static int grant_read(char *page, char **start, off_t off,
++ int count, int *eof, void *data)
++{
++ int len;
++ unsigned int i;
++ grant_entry_t *gt;
++
++ gt = (grant_entry_t *)shared;
++ len = 0;
++
++ for ( i = 0; i < NR_GRANT_REFS; i++ )
++ if ( gt[i].flags )
++ len += sprintf( page + len,
++ "Grant: ref (0x%x) flags (0x%hx) dom (0x%hx) frame (0x%x)\n",
++ i,
++ gt[i].flags,
++ gt[i].domid,
++ gt[i].frame );
++
++ *eof = 1;
++ return len;
++}
++
++static int grant_write(struct file *file, const char __user *buffer,
++ unsigned long count, void *data)
++{
++ /* TODO: implement this */
++ return -ENOSYS;
++}
++
++static int __init gnttab_init(void)
+ {
+ gnttab_setup_table_t setup;
+ unsigned long frame;
+ int i;
+
+ for ( i = 0; i < NR_GRANT_REFS; i++ )
+ gnttab_free_list[i] = i + 1;
+
+ setup.dom = DOMID_SELF;
+ setup.nr_frames = 1;
+ setup.frame_list = &frame;
+ if ( HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1) != 0 )
+ BUG();
+ if ( setup.status != 0 )
+ BUG();
+
+ set_fixmap_ma(FIX_GNTTAB, frame << PAGE_SHIFT);
+ shared = (grant_entry_t *)fix_to_virt(FIX_GNTTAB);
++
++ /*
++ * /proc/xen/grant : used by libxc to access grant tables
++ */
++ if ( (grant_pde = create_xen_proc_entry("grant", 0600)) == NULL )
++ {
++ WPRINTK("Unable to create grant xen proc entry\n");
++ return -1;
++ }
++
++ grant_file_ops.read = grant_pde->proc_fops->read;
++ grant_file_ops.write = grant_pde->proc_fops->write;
++
++ grant_pde->proc_fops = &grant_file_ops;
++
++ grant_pde->read_proc = &grant_read;
++ grant_pde->write_proc = &grant_write;
++
++ return 0;
+ }
++
++__initcall(gnttab_init);
--- /dev/null
- extern unsigned long *pfn_to_mfn_frame_list;
+
+ #define __KERNEL_SYSCALLS__
+ static int errno;
+ #include <linux/errno.h>
+ #include <linux/version.h>
+ #include <linux/kernel.h>
+ #include <linux/mm.h>
+ #include <linux/unistd.h>
+ #include <linux/module.h>
+ #include <linux/reboot.h>
+ #include <linux/sysrq.h>
+ #include <asm/irq.h>
+ #include <asm/mmu_context.h>
+ #include <asm-xen/ctrl_if.h>
+ #include <asm-xen/hypervisor.h>
+ #include <asm-xen/xen-public/dom0_ops.h>
+ #include <asm-xen/linux-public/suspend.h>
+ #include <asm-xen/queues.h>
+
+ void machine_restart(char * __unused)
+ {
+ /* We really want to get pending console data out before we die. */
+ extern void xencons_force_flush(void);
+ xencons_force_flush();
+ HYPERVISOR_reboot();
+ }
+
+ void machine_halt(void)
+ {
+ machine_power_off();
+ }
+
+ void machine_power_off(void)
+ {
+ /* We really want to get pending console data out before we die. */
+ extern void xencons_force_flush(void);
+ xencons_force_flush();
+ HYPERVISOR_shutdown();
+ }
+
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ int reboot_thru_bios = 0; /* for dmi_scan.c */
+ EXPORT_SYMBOL(machine_restart);
+ EXPORT_SYMBOL(machine_halt);
+ EXPORT_SYMBOL(machine_power_off);
+ #endif
+
+
+ /******************************************************************************
+ * Stop/pickle callback handling.
+ */
+
+ /* Ignore multiple shutdown requests. */
+ static int shutting_down = -1;
+ static int pending_sysrq = -1;
+
+ static void __do_suspend(void)
+ {
+ int i, j;
+ suspend_record_t *suspend_record;
+
+ /* Hmmm... a cleaner interface to suspend/resume blkdevs would be nice. */
+ /* XXX SMH: yes it would :-( */
+ #ifdef CONFIG_XEN_BLKDEV_FRONTEND
+ extern void blkdev_suspend(void);
+ extern void blkdev_resume(void);
+ #else
+ #define blkdev_suspend() do{}while(0)
+ #define blkdev_resume() do{}while(0)
+ #endif
+
+ #ifdef CONFIG_XEN_NETDEV_FRONTEND
+ extern void netif_suspend(void);
+ extern void netif_resume(void);
+ #else
+ #define netif_suspend() do{}while(0)
+ #define netif_resume() do{}while(0)
+ #endif
+
+ extern void time_suspend(void);
+ extern void time_resume(void);
+ extern unsigned long max_pfn;
++ extern unsigned int *pfn_to_mfn_frame_list;
+
+ suspend_record = (suspend_record_t *)__get_free_page(GFP_KERNEL);
+ if ( suspend_record == NULL )
+ goto out;
+
+ suspend_record->nr_pfns = max_pfn; /* final number of pfns */
+
+ __cli();
+
+ netif_suspend();
+
+ blkdev_suspend();
+
+ time_suspend();
+
+ ctrl_if_suspend();
+
+ irq_suspend();
+
+ HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
+ clear_fixmap(FIX_SHARED_INFO);
+
+ memcpy(&suspend_record->resume_info, &xen_start_info, sizeof(xen_start_info));
+
+ HYPERVISOR_suspend(virt_to_machine(suspend_record) >> PAGE_SHIFT);
+
+ HYPERVISOR_vm_assist(VMASST_CMD_enable,
+ VMASST_TYPE_4gb_segments);
+ #ifdef CONFIG_XEN_WRITABLE_PAGETABLES
+ HYPERVISOR_vm_assist(VMASST_CMD_enable,
+ VMASST_TYPE_writable_pagetables);
+ #endif
+
+ shutting_down = -1;
+
+ memcpy(&xen_start_info, &suspend_record->resume_info, sizeof(xen_start_info));
+
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ set_fixmap_ma(FIX_SHARED_INFO, xen_start_info.shared_info);
+ #else
+ set_fixmap(FIX_SHARED_INFO, xen_start_info.shared_info);
+ #endif
+
+ HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
+
+ memset(empty_zero_page, 0, PAGE_SIZE);
+
+ for ( i=0, j=0; i < max_pfn; i+=(PAGE_SIZE/sizeof(unsigned long)), j++ )
+ {
+ pfn_to_mfn_frame_list[j] =
+ virt_to_machine(&phys_to_machine_mapping[i]) >> PAGE_SHIFT;
+ }
+ HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list =
+ virt_to_machine(pfn_to_mfn_frame_list) >> PAGE_SHIFT;
+
+
+ irq_resume();
+
+ ctrl_if_resume();
+
+ time_resume();
+
+ blkdev_resume();
+
+ netif_resume();
+
+ __sti();
+
+ out:
+ if ( suspend_record != NULL )
+ free_page((unsigned long)suspend_record);
+ }
+
+ static int shutdown_process(void *__unused)
+ {
+ static char *envp[] = { "HOME=/", "TERM=linux",
+ "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
+ static char *restart_argv[] = { "/sbin/shutdown", "-r", "now", NULL };
+ static char *poweroff_argv[] = { "/sbin/halt", "-p", NULL };
+
+ extern asmlinkage long sys_reboot(int magic1, int magic2,
+ unsigned int cmd, void *arg);
+
+ daemonize(
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ "shutdown"
+ #endif
+ );
+
+ switch ( shutting_down )
+ {
+ case CMSG_SHUTDOWN_POWEROFF:
+ if ( execve("/sbin/halt", poweroff_argv, envp) < 0 )
+ {
+ sys_reboot(LINUX_REBOOT_MAGIC1,
+ LINUX_REBOOT_MAGIC2,
+ LINUX_REBOOT_CMD_POWER_OFF,
+ NULL);
+ }
+ break;
+
+ case CMSG_SHUTDOWN_REBOOT:
+ if ( execve("/sbin/shutdown", restart_argv, envp) < 0 )
+ {
+ sys_reboot(LINUX_REBOOT_MAGIC1,
+ LINUX_REBOOT_MAGIC2,
+ LINUX_REBOOT_CMD_RESTART,
+ NULL);
+ }
+ break;
+ }
+
+ shutting_down = -1; /* could try again */
+
+ return 0;
+ }
+
+ static void __shutdown_handler(void *unused)
+ {
+ int err;
+
+ if ( shutting_down != CMSG_SHUTDOWN_SUSPEND )
+ {
+ err = kernel_thread(shutdown_process, NULL, CLONE_FS | CLONE_FILES);
+ if ( err < 0 )
+ printk(KERN_ALERT "Error creating shutdown process!\n");
+ }
+ else
+ {
+ __do_suspend();
+ }
+ }
+
+ static void __sysrq_handler(void *unused)
+ {
+ #ifdef CONFIG_MAGIC_SYSRQ
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ handle_sysrq(pending_sysrq, NULL, NULL);
+ #else
+ handle_sysrq(pending_sysrq, NULL, NULL, NULL);
+ #endif
+ #endif
+ pending_sysrq = -1;
+ }
+
+ static void shutdown_handler(ctrl_msg_t *msg, unsigned long id)
+ {
+ static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
+ static DECLARE_WORK(sysrq_work, __sysrq_handler, NULL);
+
+ if ( (shutting_down == -1) &&
+ ((msg->subtype == CMSG_SHUTDOWN_POWEROFF) ||
+ (msg->subtype == CMSG_SHUTDOWN_REBOOT) ||
+ (msg->subtype == CMSG_SHUTDOWN_SUSPEND)) )
+ {
+ shutting_down = msg->subtype;
+ schedule_work(&shutdown_work);
+ }
+ else if ( (pending_sysrq == -1) &&
+ (msg->subtype == CMSG_SHUTDOWN_SYSRQ) )
+ {
+ pending_sysrq = msg->msg[0];
+ schedule_work(&sysrq_work);
+ }
+ else
+ {
+ printk("Ignore spurious shutdown request\n");
+ }
+
+ ctrl_if_send_response(msg);
+ }
+
+ static int __init setup_shutdown_event(void)
+ {
+ ctrl_if_register_receiver(CMSG_SHUTDOWN, shutdown_handler, 0);
+ return 0;
+ }
+
+ __initcall(setup_shutdown_event);
--- /dev/null
+
+
+ obj-y += console/
+ obj-y += evtchn/
+ obj-y += balloon/
+
+ obj-$(CONFIG_XEN_PRIVILEGED_GUEST) += privcmd/
+ obj-$(CONFIG_XEN_BLKDEV_BACKEND) += blkback/
+ obj-$(CONFIG_XEN_NETDEV_BACKEND) += netback/
+ obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += blkfront/
+ obj-$(CONFIG_XEN_NETDEV_FRONTEND) += netfront/
++obj-$(CONFIG_XEN_BLKDEV_TAP) += blktap/
+
--- /dev/null
- mcl[i].args[0] = MMAP_VADDR(idx, i) >> PAGE_SHIFT;
+ /******************************************************************************
+ * arch/xen/drivers/blkif/backend/main.c
+ *
+ * Back-end of the driver for virtual block devices. This portion of the
+ * driver exports a 'unified' block-device interface that can be accessed
+ * by any operating system that implements a compatible front end. A
+ * reference front-end implementation can be found in:
+ * arch/xen/drivers/blkif/frontend
+ *
+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
+ */
+
+ #include "common.h"
++#include <asm-xen/evtchn.h>
+
+ /*
+ * These are rather arbitrary. They are fairly large because adjacent requests
+ * pulled from a communication ring are quite likely to end up being part of
+ * the same scatter/gather request at the disc.
+ *
+ * ** TRY INCREASING 'MAX_PENDING_REQS' IF WRITE SPEEDS SEEM TOO LOW **
+ * This will increase the chances of being able to write whole tracks.
+ * 64 should be enough to keep us competitive with Linux.
+ */
+ #define MAX_PENDING_REQS 64
+ #define BATCH_PER_DOMAIN 16
+
+ static unsigned long mmap_vstart;
+ #define MMAP_PAGES_PER_REQUEST \
+ (BLKIF_MAX_SEGMENTS_PER_REQUEST + 1)
+ #define MMAP_PAGES \
+ (MAX_PENDING_REQS * MMAP_PAGES_PER_REQUEST)
+ #define MMAP_VADDR(_req,_seg) \
+ (mmap_vstart + \
+ ((_req) * MMAP_PAGES_PER_REQUEST * PAGE_SIZE) + \
+ ((_seg) * PAGE_SIZE))
+
+ /*
+ * Each outstanding request that we've passed to the lower device layers has a
+ * 'pending_req' allocated to it. Each buffer_head that completes decrements
+ * the pendcnt towards zero. When it hits zero, the specified domain has a
+ * response queued for it, with the saved 'id' passed back.
+ */
+ typedef struct {
+ blkif_t *blkif;
+ unsigned long id;
+ int nr_pages;
+ atomic_t pendcnt;
+ unsigned short operation;
+ int status;
+ } pending_req_t;
+
+ /*
+ * We can't allocate pending_req's in order, since they may complete out of
+ * order. We therefore maintain an allocation ring. This ring also indicates
+ * when enough work has been passed down -- at that point the allocation ring
+ * will be empty.
+ */
+ static pending_req_t pending_reqs[MAX_PENDING_REQS];
+ static unsigned char pending_ring[MAX_PENDING_REQS];
+ static spinlock_t pend_prod_lock = SPIN_LOCK_UNLOCKED;
+ /* NB. We use a different index type to differentiate from shared blk rings. */
+ typedef unsigned int PEND_RING_IDX;
+ #define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
+ static PEND_RING_IDX pending_prod, pending_cons;
+ #define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
+
+ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ static kmem_cache_t *buffer_head_cachep;
+ #endif
+
++#ifdef CONFIG_XEN_BLKDEV_TAP_BE
++/*
++ * If the tap driver is used, we may get pages belonging to either the tap
++ * or (more likely) the real frontend. The backend must specify which domain
++ * a given page belongs to in update_va_mapping though. For the moment,
++ * the tap rewrites the ID field of the request to contain the request index
++ * and the id of the real front end domain.
++ */
++#define BLKTAP_COOKIE 0xbeadfeed
++static inline domid_t ID_TO_DOM(unsigned long id) { return (id >> 16); }
++#endif
++
+ static int do_block_io_op(blkif_t *blkif, int max_to_do);
+ static void dispatch_probe(blkif_t *blkif, blkif_request_t *req);
+ static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req);
+ static void make_response(blkif_t *blkif, unsigned long id,
+ unsigned short op, int st);
+
+ static void fast_flush_area(int idx, int nr_pages)
+ {
+ multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST];
+ int i;
+
+ for ( i = 0; i < nr_pages; i++ )
+ {
+ mcl[i].op = __HYPERVISOR_update_va_mapping;
- blkif_ring_t *blk_ring = blkif->blk_ring_base;
++ mcl[i].args[0] = MMAP_VADDR(idx, i);
+ mcl[i].args[1] = 0;
+ mcl[i].args[2] = 0;
+ }
+
+ mcl[nr_pages-1].args[2] = UVMF_FLUSH_TLB;
+ if ( unlikely(HYPERVISOR_multicall(mcl, nr_pages) != 0) )
+ BUG();
+ }
+
+
+ /******************************************************************
+ * BLOCK-DEVICE SCHEDULER LIST MAINTENANCE
+ */
+
+ static struct list_head blkio_schedule_list;
+ static spinlock_t blkio_schedule_list_lock;
+
+ static int __on_blkdev_list(blkif_t *blkif)
+ {
+ return blkif->blkdev_list.next != NULL;
+ }
+
+ static void remove_from_blkdev_list(blkif_t *blkif)
+ {
+ unsigned long flags;
+ if ( !__on_blkdev_list(blkif) ) return;
+ spin_lock_irqsave(&blkio_schedule_list_lock, flags);
+ if ( __on_blkdev_list(blkif) )
+ {
+ list_del(&blkif->blkdev_list);
+ blkif->blkdev_list.next = NULL;
+ blkif_put(blkif);
+ }
+ spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
+ }
+
+ static void add_to_blkdev_list_tail(blkif_t *blkif)
+ {
+ unsigned long flags;
+ if ( __on_blkdev_list(blkif) ) return;
+ spin_lock_irqsave(&blkio_schedule_list_lock, flags);
+ if ( !__on_blkdev_list(blkif) && (blkif->status == CONNECTED) )
+ {
+ list_add_tail(&blkif->blkdev_list, &blkio_schedule_list);
+ blkif_get(blkif);
+ }
+ spin_unlock_irqrestore(&blkio_schedule_list_lock, flags);
+ }
+
+
+ /******************************************************************
+ * SCHEDULER FUNCTIONS
+ */
+
+ static DECLARE_WAIT_QUEUE_HEAD(blkio_schedule_wait);
+
+ static int blkio_schedule(void *arg)
+ {
+ DECLARE_WAITQUEUE(wq, current);
+
+ blkif_t *blkif;
+ struct list_head *ent;
+
+ daemonize(
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ "xenblkd"
+ #endif
+ );
+
+ for ( ; ; )
+ {
+ /* Wait for work to do. */
+ add_wait_queue(&blkio_schedule_wait, &wq);
+ set_current_state(TASK_INTERRUPTIBLE);
+ if ( (NR_PENDING_REQS == MAX_PENDING_REQS) ||
+ list_empty(&blkio_schedule_list) )
+ schedule();
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&blkio_schedule_wait, &wq);
+
+ /* Queue up a batch of requests. */
+ while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
+ !list_empty(&blkio_schedule_list) )
+ {
+ ent = blkio_schedule_list.next;
+ blkif = list_entry(ent, blkif_t, blkdev_list);
+ blkif_get(blkif);
+ remove_from_blkdev_list(blkif);
+ if ( do_block_io_op(blkif, BATCH_PER_DOMAIN) )
+ add_to_blkdev_list_tail(blkif);
+ blkif_put(blkif);
+ }
+
+ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ /* Push the batch through to disc. */
+ run_task_queue(&tq_disk);
+ #endif
+ }
+ }
+
+ static void maybe_trigger_blkio_schedule(void)
+ {
+ /*
+ * Needed so that two processes, who together make the following predicate
+ * true, don't both read stale values and evaluate the predicate
+ * incorrectly. Incredibly unlikely to stall the scheduler on x86, but...
+ */
+ smp_mb();
+
+ if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
+ !list_empty(&blkio_schedule_list) )
+ wake_up(&blkio_schedule_wait);
+ }
+
+
+
+ /******************************************************************
+ * COMPLETION CALLBACK -- Called as bh->b_end_io()
+ */
+
+ static void __end_block_io_op(pending_req_t *pending_req, int uptodate)
+ {
+ unsigned long flags;
+
+ /* An error fails the entire request. */
+ if ( !uptodate )
+ {
+ DPRINTK("Buffer not up-to-date at end of operation\n");
+ pending_req->status = BLKIF_RSP_ERROR;
+ }
+
+ if ( atomic_dec_and_test(&pending_req->pendcnt) )
+ {
+ int pending_idx = pending_req - pending_reqs;
+ fast_flush_area(pending_idx, pending_req->nr_pages);
+ make_response(pending_req->blkif, pending_req->id,
+ pending_req->operation, pending_req->status);
+ blkif_put(pending_req->blkif);
+ spin_lock_irqsave(&pend_prod_lock, flags);
+ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
+ spin_unlock_irqrestore(&pend_prod_lock, flags);
+ maybe_trigger_blkio_schedule();
+ }
+ }
+
+ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ static void end_block_io_op(struct buffer_head *bh, int uptodate)
+ {
+ __end_block_io_op(bh->b_private, uptodate);
+ kmem_cache_free(buffer_head_cachep, bh);
+ }
+ #else
+ static int end_block_io_op(struct bio *bio, unsigned int done, int error)
+ {
+ if ( done || error )
+ __end_block_io_op(bio->bi_private, (done && !error));
+ bio_put(bio);
+ return error;
+ }
+ #endif
+
+
+ /******************************************************************************
+ * NOTIFICATION FROM GUEST OS.
+ */
+
+ irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
+ {
+ blkif_t *blkif = dev_id;
+ add_to_blkdev_list_tail(blkif);
+ maybe_trigger_blkio_schedule();
+ return IRQ_HANDLED;
+ }
+
+
+
+ /******************************************************************
+ * DOWNWARD CALLS -- These interface with the block-device layer proper.
+ */
+
+ static int do_block_io_op(blkif_t *blkif, int max_to_do)
+ {
- BLKIF_RING_IDX i, rp;
++ blkif_back_ring_t *blk_ring = &blkif->blk_ring;
+ blkif_request_t *req;
- rp = blk_ring->req_prod;
++ RING_IDX i, rp;
+ int more_to_do = 0;
+
- /* Take items off the comms ring, taking care not to overflow. */
- for ( i = blkif->blk_req_cons;
- (i != rp) && ((i-blkif->blk_resp_prod) != BLKIF_RING_SIZE);
++ rp = blk_ring->sring->req_prod;
+ rmb(); /* Ensure we see queued requests up to 'rp'. */
+
- req = &blk_ring->ring[MASK_BLKIF_IDX(i)].req;
++ for ( i = blk_ring->req_cons;
++ (i != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, i);
+ i++ )
+ {
+ if ( (max_to_do-- == 0) || (NR_PENDING_REQS == MAX_PENDING_REQS) )
+ {
+ more_to_do = 1;
+ break;
+ }
+
- blk_ring->ring[i].req.operation);
- make_response(blkif, blk_ring->ring[i].req.id,
- blk_ring->ring[i].req.operation, BLKIF_RSP_ERROR);
++ req = RING_GET_REQUEST(blk_ring, i);
+ switch ( req->operation )
+ {
+ case BLKIF_OP_READ:
+ case BLKIF_OP_WRITE:
+ dispatch_rw_block_io(blkif, req);
+ break;
+
+ case BLKIF_OP_PROBE:
+ dispatch_probe(blkif, req);
+ break;
+
+ default:
+ DPRINTK("error: unknown block io operation [%d]\n",
- blkif->blk_req_cons = i;
++ req->operation);
++ make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
+ break;
+ }
+ }
+
- MMAP_VADDR(pending_idx, 0) >> PAGE_SHIFT,
++ blk_ring->req_cons = i;
+ return more_to_do;
+ }
+
+ static void dispatch_probe(blkif_t *blkif, blkif_request_t *req)
+ {
+ int rsp = BLKIF_RSP_ERROR;
+ int pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
+
+ /* We expect one buffer only. */
+ if ( unlikely(req->nr_segments != 1) )
+ goto out;
+
+ /* Make sure the buffer is page-sized. */
+ if ( (blkif_first_sect(req->frame_and_sects[0]) != 0) ||
+ (blkif_last_sect(req->frame_and_sects[0]) != 7) )
+ goto out;
+
++#ifdef CONFIG_XEN_BLKDEV_TAP_BE
++ /* Grab the real frontend out of the probe message. */
++ if (req->frame_and_sects[1] == BLKTAP_COOKIE)
++ blkif->is_blktap = 1;
++#endif
++
++
++#ifdef CONFIG_XEN_BLKDEV_TAP_BE
+ if ( HYPERVISOR_update_va_mapping_otherdomain(
- 0, blkif->domid) )
++ MMAP_VADDR(pending_idx, 0),
+ (pte_t) { (req->frame_and_sects[0] & PAGE_MASK) | __PAGE_KERNEL },
-
++ 0, (blkif->is_blktap ? ID_TO_DOM(req->id) : blkif->domid) ) )
++
+ goto out;
- mcl[i].args[0] = MMAP_VADDR(pending_idx, i) >> PAGE_SHIFT;
++#else
++ if ( HYPERVISOR_update_va_mapping_otherdomain(
++ MMAP_VADDR(pending_idx, 0),
++ (pte_t) { (req->frame_and_sects[0] & PAGE_MASK) | __PAGE_KERNEL },
++ 0, blkif->domid) )
++
++ goto out;
++#endif
++
+ rsp = vbd_probe(blkif, (vdisk_t *)MMAP_VADDR(pending_idx, 0),
+ PAGE_SIZE / sizeof(vdisk_t));
+
+ out:
+ fast_flush_area(pending_idx, 1);
+ make_response(blkif, req->id, req->operation, rsp);
+ }
+
+ static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req)
+ {
+ extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
+ int operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
+ short nr_sects;
+ unsigned long buffer, fas;
+ int i, tot_sects, pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
+ pending_req_t *pending_req;
+ unsigned long remap_prot;
+ multicall_entry_t mcl[MMAP_PAGES_PER_REQUEST];
+
+ /* We map virtual scatter/gather segments to physical segments. */
+ int new_segs, nr_psegs = 0;
+ phys_seg_t phys_seg[BLKIF_MAX_SEGMENTS_PER_REQUEST + 1];
+
+ /* Check that number of segments is sane. */
+ if ( unlikely(req->nr_segments == 0) ||
+ unlikely(req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST) )
+ {
+ DPRINTK("Bad number of segments in request (%d)\n", req->nr_segments);
+ goto bad_descriptor;
+ }
+
+ /*
+ * Check each address/size pair is sane, and convert into a
+ * physical device and block offset. Note that if the offset and size
+ * crosses a virtual extent boundary, we may end up with more
+ * physical scatter/gather segments than virtual segments.
+ */
+ for ( i = tot_sects = 0; i < req->nr_segments; i++, tot_sects += nr_sects )
+ {
+ fas = req->frame_and_sects[i];
+ buffer = (fas & PAGE_MASK) | (blkif_first_sect(fas) << 9);
+ nr_sects = blkif_last_sect(fas) - blkif_first_sect(fas) + 1;
+
+ if ( nr_sects <= 0 )
+ goto bad_descriptor;
+
+ phys_seg[nr_psegs].dev = req->device;
+ phys_seg[nr_psegs].sector_number = req->sector_number + tot_sects;
+ phys_seg[nr_psegs].buffer = buffer;
+ phys_seg[nr_psegs].nr_sects = nr_sects;
+
+ /* Translate the request into the relevant 'physical device' */
+ new_segs = vbd_translate(&phys_seg[nr_psegs], blkif, operation);
+ if ( new_segs < 0 )
+ {
+ DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
+ operation == READ ? "read" : "write",
+ req->sector_number + tot_sects,
+ req->sector_number + tot_sects + nr_sects,
+ req->device);
+ goto bad_descriptor;
+ }
+
+ nr_psegs += new_segs;
+ ASSERT(nr_psegs <= (BLKIF_MAX_SEGMENTS_PER_REQUEST+1));
+ }
+
+ /* Nonsensical zero-sized request? */
+ if ( unlikely(nr_psegs == 0) )
+ goto bad_descriptor;
+
+ if ( operation == READ )
+ remap_prot = _PAGE_PRESENT|_PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_RW;
+ else
+ remap_prot = _PAGE_PRESENT|_PAGE_DIRTY|_PAGE_ACCESSED;
+
+ for ( i = 0; i < nr_psegs; i++ )
+ {
+ mcl[i].op = __HYPERVISOR_update_va_mapping_otherdomain;
-
++ mcl[i].args[0] = MMAP_VADDR(pending_idx, i);
+ mcl[i].args[1] = (phys_seg[i].buffer & PAGE_MASK) | remap_prot;
+ mcl[i].args[2] = 0;
++#ifdef CONFIG_XEN_BLKDEV_TAP_BE
++ mcl[i].args[3] = (blkif->is_blktap) ? ID_TO_DOM(req->id) : blkif->domid;
++#else
+ mcl[i].args[3] = blkif->domid;
- resp = &blkif->blk_ring_base->
- ring[MASK_BLKIF_IDX(blkif->blk_resp_prod)].resp;
++#endif
+ phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
+ FOREIGN_FRAME(phys_seg[i].buffer >> PAGE_SHIFT);
+ }
+
+ if ( unlikely(HYPERVISOR_multicall(mcl, nr_psegs) != 0) )
+ BUG();
+
+ for ( i = 0; i < nr_psegs; i++ )
+ {
+ if ( unlikely(mcl[i].args[5] != 0) )
+ {
+ DPRINTK("invalid buffer -- could not remap it\n");
+ fast_flush_area(pending_idx, nr_psegs);
+ goto bad_descriptor;
+ }
+ }
+
+ pending_req = &pending_reqs[pending_idx];
+ pending_req->blkif = blkif;
+ pending_req->id = req->id;
+ pending_req->operation = operation;
+ pending_req->status = BLKIF_RSP_OKAY;
+ pending_req->nr_pages = nr_psegs;
+ atomic_set(&pending_req->pendcnt, nr_psegs);
+ pending_cons++;
+
+ blkif_get(blkif);
+
+ /* Now we pass each segment down to the real blkdev layer. */
+ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ for ( i = 0; i < nr_psegs; i++ )
+ {
+ struct buffer_head *bh;
+
+ bh = kmem_cache_alloc(buffer_head_cachep, GFP_ATOMIC);
+ if ( unlikely(bh == NULL) )
+ {
+ __end_block_io_op(pending_req, 0);
+ continue;
+ }
+
+ memset(bh, 0, sizeof (struct buffer_head));
+
+ init_waitqueue_head(&bh->b_wait);
+ bh->b_size = phys_seg[i].nr_sects << 9;
+ bh->b_dev = phys_seg[i].dev;
+ bh->b_rdev = phys_seg[i].dev;
+ bh->b_rsector = (unsigned long)phys_seg[i].sector_number;
+ bh->b_data = (char *)MMAP_VADDR(pending_idx, i) +
+ (phys_seg[i].buffer & ~PAGE_MASK);
+ bh->b_page = virt_to_page(MMAP_VADDR(pending_idx, i));
+ bh->b_end_io = end_block_io_op;
+ bh->b_private = pending_req;
+
+ bh->b_state = (1 << BH_Mapped) | (1 << BH_Lock) |
+ (1 << BH_Req) | (1 << BH_Launder);
+ if ( operation == WRITE )
+ bh->b_state |= (1 << BH_JBD) | (1 << BH_Req) | (1 << BH_Uptodate);
+
+ atomic_set(&bh->b_count, 1);
+
+ /* Dispatch a single request. We'll flush it to disc later. */
+ generic_make_request(operation, bh);
+ }
+ #else
+ for ( i = 0; i < nr_psegs; i++ )
+ {
+ struct bio *bio;
+ struct bio_vec *bv;
+
+ bio = bio_alloc(GFP_ATOMIC, 1);
+ if ( unlikely(bio == NULL) )
+ {
+ __end_block_io_op(pending_req, 0);
+ continue;
+ }
+
+ bio->bi_bdev = phys_seg[i].bdev;
+ bio->bi_private = pending_req;
+ bio->bi_end_io = end_block_io_op;
+ bio->bi_sector = phys_seg[i].sector_number;
+ bio->bi_rw = operation;
+
+ bv = bio_iovec_idx(bio, 0);
+ bv->bv_page = virt_to_page(MMAP_VADDR(pending_idx, i));
+ bv->bv_len = phys_seg[i].nr_sects << 9;
+ bv->bv_offset = phys_seg[i].buffer & ~PAGE_MASK;
+
+ bio->bi_size = bv->bv_len;
+ bio->bi_vcnt++;
+
+ submit_bio(operation, bio);
+ }
+ #endif
+
+ return;
+
+ bad_descriptor:
+ make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
+ }
+
+
+
+ /******************************************************************
+ * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
+ */
+
+
+ static void make_response(blkif_t *blkif, unsigned long id,
+ unsigned short op, int st)
+ {
+ blkif_response_t *resp;
+ unsigned long flags;
++ blkif_back_ring_t *blk_ring = &blkif->blk_ring;
+
+ /* Place on the response ring for the relevant domain. */
+ spin_lock_irqsave(&blkif->blk_ring_lock, flags);
- blkif->blk_ring_base->resp_prod = ++blkif->blk_resp_prod;
++ resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
+ resp->id = id;
+ resp->operation = op;
+ resp->status = st;
+ wmb(); /* Ensure other side can see the response fields. */
-
++ blk_ring->rsp_prod_pvt++;
++ RING_PUSH_RESPONSES(blk_ring);
+ spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
+
+ /* Kick the relevant domain. */
+ notify_via_evtchn(blkif->evtchn);
+ }
+
+ void blkif_deschedule(blkif_t *blkif)
+ {
+ remove_from_blkdev_list(blkif);
+ }
+
+ static int __init blkif_init(void)
+ {
+ int i;
+
+ if ( !(xen_start_info.flags & SIF_INITDOMAIN) &&
+ !(xen_start_info.flags & SIF_BLK_BE_DOMAIN) )
+ return 0;
+
+ blkif_interface_init();
+
+ if ( (mmap_vstart = allocate_empty_lowmem_region(MMAP_PAGES)) == 0 )
+ BUG();
+
+ pending_cons = 0;
+ pending_prod = MAX_PENDING_REQS;
+ memset(pending_reqs, 0, sizeof(pending_reqs));
+ for ( i = 0; i < MAX_PENDING_REQS; i++ )
+ pending_ring[i] = i;
+
+ spin_lock_init(&blkio_schedule_list_lock);
+ INIT_LIST_HEAD(&blkio_schedule_list);
+
+ if ( kernel_thread(blkio_schedule, 0, CLONE_FS | CLONE_FILES) < 0 )
+ BUG();
+
+ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ buffer_head_cachep = kmem_cache_create(
+ "buffer_head_cache", sizeof(struct buffer_head),
+ 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+ #endif
+
+ blkif_ctrlif_init();
++
++#ifdef CONFIG_XEN_BLKDEV_TAP_BE
++ printk(KERN_ALERT "NOTE: Blkif backend is running with tap support on!\n");
++#endif
+ return 0;
+ }
+
+ __initcall(blkif_init);
--- /dev/null
- domid_t domid;
- unsigned int handle;
+
+ #ifndef __BLKIF__BACKEND__COMMON_H__
+ #define __BLKIF__BACKEND__COMMON_H__
+
+ #include <linux/config.h>
+ #include <linux/version.h>
+ #include <linux/module.h>
+ #include <linux/rbtree.h>
+ #include <linux/interrupt.h>
+ #include <linux/slab.h>
+ #include <linux/blkdev.h>
+ #include <asm/io.h>
+ #include <asm/setup.h>
+ #include <asm/pgalloc.h>
+ #include <asm-xen/ctrl_if.h>
+ #include <asm-xen/hypervisor.h>
+ #include <asm-xen/xen-public/io/blkif.h>
++#include <asm-xen/xen-public/io/ring.h>
+
+ #if 0
+ #define ASSERT(_p) \
+ if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
+ __LINE__, __FILE__); *(int*)0=0; }
+ #define DPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
+ __FILE__ , __LINE__ , ## _a )
+ #else
+ #define ASSERT(_p) ((void)0)
+ #define DPRINTK(_f, _a...) ((void)0)
+ #endif
+
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ typedef struct rb_root rb_root_t;
+ typedef struct rb_node rb_node_t;
+ #else
+ struct block_device;
+ #endif
+
+ typedef struct blkif_st {
+ /* Unique identifier for this interface. */
- unsigned long shmem_frame;
- unsigned int evtchn;
- int irq;
++ domid_t domid;
++ unsigned int handle;
+ /* Physical parameters of the comms window. */
- blkif_ring_t *blk_ring_base; /* ioremap()'ed ptr to shmem_frame. */
- BLKIF_RING_IDX blk_req_cons; /* Request consumer. */
- BLKIF_RING_IDX blk_resp_prod; /* Private version of resp. producer. */
++ unsigned long shmem_frame;
++ unsigned int evtchn;
++ int irq;
+ /* Comms information. */
- rb_root_t vbd_rb; /* Mapping from 16-bit vdevices to VBDs. */
- spinlock_t vbd_lock; /* Protects VBD mapping. */
++ blkif_back_ring_t blk_ring;
+ /* VBDs attached to this interface. */
++ rb_root_t vbd_rb; /* Mapping from 16-bit vdevices to VBDs.*/
++ spinlock_t vbd_lock; /* Protects VBD mapping. */
+ /* Private fields. */
+ enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
+ /*
+ * DISCONNECT response is deferred until pending requests are ack'ed.
+ * We therefore need to store the id from the original request.
+ */
+ u8 disconnect_rspid;
++#ifdef CONFIG_XEN_BLKDEV_TAP_BE
++ /* Is this a blktap frontend */
++ unsigned int is_blktap;
++#endif
+ struct blkif_st *hash_next;
+ struct list_head blkdev_list;
+ spinlock_t blk_ring_lock;
+ atomic_t refcnt;
+
+ struct work_struct work;
+ } blkif_t;
+
+ void blkif_create(blkif_be_create_t *create);
+ void blkif_destroy(blkif_be_destroy_t *destroy);
+ void blkif_connect(blkif_be_connect_t *connect);
+ int blkif_disconnect(blkif_be_disconnect_t *disconnect, u8 rsp_id);
+ void blkif_disconnect_complete(blkif_t *blkif);
+ blkif_t *blkif_find_by_handle(domid_t domid, unsigned int handle);
+ #define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
+ #define blkif_put(_b) \
+ do { \
+ if ( atomic_dec_and_test(&(_b)->refcnt) ) \
+ blkif_disconnect_complete(_b); \
+ } while (0)
+
+ /* An entry in a list of xen_extents. */
+ typedef struct _blkif_extent_le {
+ blkif_extent_t extent; /* an individual extent */
+ struct _blkif_extent_le *next; /* and a pointer to the next */
+ struct block_device *bdev;
+ } blkif_extent_le_t;
+
+ typedef struct _vbd {
+ blkif_vdev_t vdevice; /* what the domain refers to this vbd as */
+ unsigned char readonly; /* Non-zero -> read-only */
+ unsigned char type; /* VDISK_TYPE_xxx */
+ blkif_extent_le_t *extents; /* list of xen_extents making up this vbd */
+ rb_node_t rb; /* for linking into R-B tree lookup struct */
+ } vbd_t;
+
+ void vbd_create(blkif_be_vbd_create_t *create);
+ void vbd_grow(blkif_be_vbd_grow_t *grow);
+ void vbd_shrink(blkif_be_vbd_shrink_t *shrink);
+ void vbd_destroy(blkif_be_vbd_destroy_t *delete);
+ int vbd_probe(blkif_t *blkif, vdisk_t *vbd_info, int max_vbds);
+ void destroy_all_vbds(blkif_t *blkif);
+
+ /* Describes a [partial] disk extent (part of a block io request) */
+ typedef struct {
+ unsigned short dev;
+ unsigned short nr_sects;
+ struct block_device *bdev;
+ unsigned long buffer;
+ blkif_sector_t sector_number;
+ } phys_seg_t;
+
+ int vbd_translate(phys_seg_t *pseg, blkif_t *blkif, int operation);
+
+ void blkif_interface_init(void);
+ void blkif_ctrlif_init(void);
+
+ void blkif_deschedule(blkif_t *blkif);
+
+ irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
+
+ #endif /* __BLKIF__BACKEND__COMMON_H__ */
--- /dev/null
- vfree(blkif->blk_ring_base);
+ /******************************************************************************
+ * arch/xen/drivers/blkif/backend/interface.c
+ *
+ * Block-device interface management.
+ *
+ * Copyright (c) 2004, Keir Fraser
+ */
+
+ #include "common.h"
+
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ #define VMALLOC_VMADDR(x) ((unsigned long)(x))
+ #endif
+
+ #define BLKIF_HASHSZ 1024
+ #define BLKIF_HASH(_d,_h) (((int)(_d)^(int)(_h))&(BLKIF_HASHSZ-1))
+
+ static kmem_cache_t *blkif_cachep;
+ static blkif_t *blkif_hash[BLKIF_HASHSZ];
+
+ blkif_t *blkif_find_by_handle(domid_t domid, unsigned int handle)
+ {
+ blkif_t *blkif = blkif_hash[BLKIF_HASH(domid, handle)];
+ while ( (blkif != NULL) &&
+ ((blkif->domid != domid) || (blkif->handle != handle)) )
+ blkif = blkif->hash_next;
+ return blkif;
+ }
+
+ static void __blkif_disconnect_complete(void *arg)
+ {
+ blkif_t *blkif = (blkif_t *)arg;
+ ctrl_msg_t cmsg;
+ blkif_be_disconnect_t disc;
+
+ /*
+ * These can't be done in blkif_disconnect() because at that point there
+ * may be outstanding requests at the disc whose asynchronous responses
+ * must still be notified to the remote driver.
+ */
+ unbind_evtchn_from_irq(blkif->evtchn);
- domid_t domid = connect->domid;
- unsigned int handle = connect->blkif_handle;
- unsigned int evtchn = connect->evtchn;
- unsigned long shmem_frame = connect->shmem_frame;
++ vfree(blkif->blk_ring.sring);
+
+ /* Construct the deferred response message. */
+ cmsg.type = CMSG_BLKIF_BE;
+ cmsg.subtype = CMSG_BLKIF_BE_DISCONNECT;
+ cmsg.id = blkif->disconnect_rspid;
+ cmsg.length = sizeof(blkif_be_disconnect_t);
+ disc.domid = blkif->domid;
+ disc.blkif_handle = blkif->handle;
+ disc.status = BLKIF_BE_STATUS_OKAY;
+ memcpy(cmsg.msg, &disc, sizeof(disc));
+
+ /*
+ * Make sure message is constructed /before/ status change, because
+ * after the status change the 'blkif' structure could be deallocated at
+ * any time. Also make sure we send the response /after/ status change,
+ * as otherwise a subsequent CONNECT request could spuriously fail if
+ * another CPU doesn't see the status change yet.
+ */
+ mb();
+ if ( blkif->status != DISCONNECTING )
+ BUG();
+ blkif->status = DISCONNECTED;
+ mb();
+
+ /* Send the successful response. */
+ ctrl_if_send_response(&cmsg);
+ }
+
+ void blkif_disconnect_complete(blkif_t *blkif)
+ {
+ INIT_WORK(&blkif->work, __blkif_disconnect_complete, (void *)blkif);
+ schedule_work(&blkif->work);
+ }
+
+ void blkif_create(blkif_be_create_t *create)
+ {
+ domid_t domid = create->domid;
+ unsigned int handle = create->blkif_handle;
+ blkif_t **pblkif, *blkif;
+
+ if ( (blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL)) == NULL )
+ {
+ DPRINTK("Could not create blkif: out of memory\n");
+ create->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
+ return;
+ }
+
+ memset(blkif, 0, sizeof(*blkif));
+ blkif->domid = domid;
+ blkif->handle = handle;
+ blkif->status = DISCONNECTED;
+ spin_lock_init(&blkif->vbd_lock);
+ spin_lock_init(&blkif->blk_ring_lock);
+ atomic_set(&blkif->refcnt, 0);
+
+ pblkif = &blkif_hash[BLKIF_HASH(domid, handle)];
+ while ( *pblkif != NULL )
+ {
+ if ( ((*pblkif)->domid == domid) && ((*pblkif)->handle == handle) )
+ {
+ DPRINTK("Could not create blkif: already exists\n");
+ create->status = BLKIF_BE_STATUS_INTERFACE_EXISTS;
+ kmem_cache_free(blkif_cachep, blkif);
+ return;
+ }
+ pblkif = &(*pblkif)->hash_next;
+ }
+
+ blkif->hash_next = *pblkif;
+ *pblkif = blkif;
+
+ DPRINTK("Successfully created blkif\n");
+ create->status = BLKIF_BE_STATUS_OKAY;
+ }
+
+ void blkif_destroy(blkif_be_destroy_t *destroy)
+ {
+ domid_t domid = destroy->domid;
+ unsigned int handle = destroy->blkif_handle;
+ blkif_t **pblkif, *blkif;
+
+ pblkif = &blkif_hash[BLKIF_HASH(domid, handle)];
+ while ( (blkif = *pblkif) != NULL )
+ {
+ if ( (blkif->domid == domid) && (blkif->handle == handle) )
+ {
+ if ( blkif->status != DISCONNECTED )
+ goto still_connected;
+ goto destroy;
+ }
+ pblkif = &blkif->hash_next;
+ }
+
+ destroy->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return;
+
+ still_connected:
+ destroy->status = BLKIF_BE_STATUS_INTERFACE_CONNECTED;
+ return;
+
+ destroy:
+ *pblkif = blkif->hash_next;
+ destroy_all_vbds(blkif);
+ kmem_cache_free(blkif_cachep, blkif);
+ destroy->status = BLKIF_BE_STATUS_OKAY;
+ }
+
+ void blkif_connect(blkif_be_connect_t *connect)
+ {
- pgprot_t prot;
- int error;
- blkif_t *blkif;
++ domid_t domid = connect->domid;
++ unsigned int handle = connect->blkif_handle;
++ unsigned int evtchn = connect->evtchn;
++ unsigned long shmem_frame = connect->shmem_frame;
+ struct vm_struct *vma;
-
++ pgprot_t prot;
++ int error;
++ blkif_t *blkif;
++ blkif_sring_t *sring;
+
+ blkif = blkif_find_by_handle(domid, handle);
+ if ( unlikely(blkif == NULL) )
+ {
+ DPRINTK("blkif_connect attempted for non-existent blkif (%u,%u)\n",
+ connect->domid, connect->blkif_handle);
+ connect->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return;
+ }
+
+ if ( (vma = get_vm_area(PAGE_SIZE, VM_IOREMAP)) == NULL )
+ {
+ connect->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
+ return;
+ }
+
+ prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED);
+ error = direct_remap_area_pages(&init_mm, VMALLOC_VMADDR(vma->addr),
+ shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
+ prot, domid);
+ if ( error != 0 )
+ {
+ if ( error == -ENOMEM )
+ connect->status = BLKIF_BE_STATUS_OUT_OF_MEMORY;
+ else if ( error == -EFAULT )
+ connect->status = BLKIF_BE_STATUS_MAPPING_ERROR;
+ else
+ connect->status = BLKIF_BE_STATUS_ERROR;
+ vfree(vma->addr);
+ return;
+ }
+
+ if ( blkif->status != DISCONNECTED )
+ {
+ connect->status = BLKIF_BE_STATUS_INTERFACE_CONNECTED;
+ vfree(vma->addr);
+ return;
+ }
- blkif->blk_ring_base = (blkif_ring_t *)vma->addr;
++ sring = (blkif_sring_t *)vma->addr;
++ SHARED_RING_INIT(sring);
++ BACK_RING_INIT(&blkif->blk_ring, sring);
++
+ blkif->evtchn = evtchn;
+ blkif->irq = bind_evtchn_to_irq(evtchn);
+ blkif->shmem_frame = shmem_frame;
+ blkif->status = CONNECTED;
+ blkif_get(blkif);
+
+ request_irq(blkif->irq, blkif_be_int, 0, "blkif-backend", blkif);
+
+ connect->status = BLKIF_BE_STATUS_OKAY;
+ }
+
+ int blkif_disconnect(blkif_be_disconnect_t *disconnect, u8 rsp_id)
+ {
+ domid_t domid = disconnect->domid;
+ unsigned int handle = disconnect->blkif_handle;
+ blkif_t *blkif;
+
+ blkif = blkif_find_by_handle(domid, handle);
+ if ( unlikely(blkif == NULL) )
+ {
+ DPRINTK("blkif_disconnect attempted for non-existent blkif"
+ " (%u,%u)\n", disconnect->domid, disconnect->blkif_handle);
+ disconnect->status = BLKIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return 1; /* Caller will send response error message. */
+ }
+
+ if ( blkif->status == CONNECTED )
+ {
+ blkif->status = DISCONNECTING;
+ blkif->disconnect_rspid = rsp_id;
+ wmb(); /* Let other CPUs see the status change. */
+ free_irq(blkif->irq, blkif);
+ blkif_deschedule(blkif);
+ blkif_put(blkif);
+ return 0; /* Caller should not send response message. */
+ }
+
+ disconnect->status = BLKIF_BE_STATUS_OKAY;
+ return 1;
+ }
+
+ void __init blkif_interface_init(void)
+ {
+ blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t),
+ 0, 0, NULL, NULL);
+ memset(blkif_hash, 0, sizeof(blkif_hash));
+ }
--- /dev/null
-static blkif_ring_t *blk_ring = NULL;
-static BLKIF_RING_IDX resp_cons; /* Response consumer for comms ring. */
-static BLKIF_RING_IDX req_prod; /* Private request producer. */
+ /******************************************************************************
+ * blkfront.c
+ *
+ * XenLinux virtual block-device driver.
+ *
+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
+ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
+ * Copyright (c) 2004, Christian Limpach
++ * Copyright (c) 2004, Andrew Warfield
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+ #include <linux/version.h>
+
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ #include "block.h"
+ #else
+ #include "common.h"
+ #include <linux/blk.h>
+ #include <linux/tqueue.h>
+ #endif
+
+ #include <linux/cdrom.h>
+ #include <linux/sched.h>
+ #include <linux/interrupt.h>
+ #include <scsi/scsi.h>
+ #include <asm-xen/ctrl_if.h>
++#include <asm-xen/evtchn.h>
+
+ typedef unsigned char byte; /* from linux/ide.h */
+
+ /* Control whether runtime update of vbds is enabled. */
+ #define ENABLE_VBD_UPDATE 1
+
+ #if ENABLE_VBD_UPDATE
+ static void vbd_update(void);
+ #else
+ static void vbd_update(void){};
+ #endif
+
+ #define BLKIF_STATE_CLOSED 0
+ #define BLKIF_STATE_DISCONNECTED 1
+ #define BLKIF_STATE_CONNECTED 2
+
+ #define WPRINTK(fmt, args...) printk(KERN_WARNING "xen_blk: " fmt, ##args)
+
+ static int blkif_handle = 0;
+ static unsigned int blkif_state = BLKIF_STATE_CLOSED;
+ static unsigned int blkif_evtchn = 0;
+ static unsigned int blkif_irq = 0;
+
+ static int blkif_control_rsp_valid;
+ static blkif_response_t blkif_control_rsp;
+
-blkif_request_t rec_ring[BLKIF_RING_SIZE];
++static blkif_front_ring_t blk_ring;
+
+ unsigned long rec_ring_free;
-/* We plug the I/O ring if the driver is suspended or if the ring is full. */
-#define BLKIF_RING_FULL (((req_prod - resp_cons) == BLKIF_RING_SIZE) || \
- (blkif_state != BLKIF_STATE_CONNECTED))
-
++blkif_request_t rec_ring[RING_SIZE(&blk_ring)];
+
+ static int recovery = 0; /* "Recovery in progress" flag. Protected
+ * by the blkif_io_lock */
+
- if ( free > BLKIF_RING_SIZE )
+ static void kick_pending_request_queues(void);
+
+ int __init xlblk_init(void);
+
+ void blkif_completion( blkif_request_t *req );
+
+ static inline int GET_ID_FROM_FREELIST( void )
+ {
+ unsigned long free = rec_ring_free;
+
- wmb(); /* Ensure that the frontend can see the requests. */
- blk_ring->req_prod = req_prod;
++ if ( free > RING_SIZE(&blk_ring) )
+ BUG();
+
+ rec_ring_free = rec_ring[free].id;
+
+ rec_ring[free].id = 0x0fffffee; /* debug */
+
+ return free;
+ }
+
+ static inline void ADD_ID_TO_FREELIST( unsigned long id )
+ {
+ rec_ring[id].id = rec_ring_free;
+ rec_ring_free = id;
+ }
+
+
+ /************************ COMMON CODE (inlined) ************************/
+
+ /* Kernel-specific definitions used in the common code */
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ #define DISABLE_SCATTERGATHER()
+ #else
+ static int sg_operation = -1;
+ #define DISABLE_SCATTERGATHER() (sg_operation = -1)
+ #endif
+
+ static inline void translate_req_to_pfn(blkif_request_t *xreq,
+ blkif_request_t *req)
+ {
+ int i;
+
+ xreq->operation = req->operation;
+ xreq->nr_segments = req->nr_segments;
+ xreq->device = req->device;
+ /* preserve id */
+ xreq->sector_number = req->sector_number;
+
+ for ( i = 0; i < req->nr_segments; i++ )
+ xreq->frame_and_sects[i] = machine_to_phys(req->frame_and_sects[i]);
+ }
+
+ static inline void translate_req_to_mfn(blkif_request_t *xreq,
+ blkif_request_t *req)
+ {
+ int i;
+
+ xreq->operation = req->operation;
+ xreq->nr_segments = req->nr_segments;
+ xreq->device = req->device;
+ xreq->id = req->id; /* copy id (unlike above) */
+ xreq->sector_number = req->sector_number;
+
+ for ( i = 0; i < req->nr_segments; i++ )
+ xreq->frame_and_sects[i] = phys_to_machine(req->frame_and_sects[i]);
+ }
+
+
+ static inline void flush_requests(void)
+ {
+ DISABLE_SCATTERGATHER();
- ring_req = &blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req;
++ RING_PUSH_REQUESTS(&blk_ring);
+ notify_via_evtchn(blkif_evtchn);
+ }
+
+
+
+
+ /************************** KERNEL VERSION 2.6 **************************/
+
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+
+ module_init(xlblk_init);
+
+ #if ENABLE_VBD_UPDATE
+ static void vbd_update(void)
+ {
+ }
+ #endif /* ENABLE_VBD_UPDATE */
+
+ static void kick_pending_request_queues(void)
+ {
+
+ if ( (xlbd_blk_queue != NULL) &&
+ test_bit(QUEUE_FLAG_STOPPED, &xlbd_blk_queue->queue_flags) )
+ {
+ blk_start_queue(xlbd_blk_queue);
+ /* XXXcl call to request_fn should not be needed but
+ * we get stuck without... needs investigating
+ */
+ xlbd_blk_queue->request_fn(xlbd_blk_queue);
+ }
+
+ }
+
+
+ int blkif_open(struct inode *inode, struct file *filep)
+ {
+ struct gendisk *gd = inode->i_bdev->bd_disk;
+ struct xlbd_disk_info *di = (struct xlbd_disk_info *)gd->private_data;
+
+ /* Update of usage count is protected by per-device semaphore. */
+ di->mi->usage++;
+
+ return 0;
+ }
+
+
+ int blkif_release(struct inode *inode, struct file *filep)
+ {
+ struct gendisk *gd = inode->i_bdev->bd_disk;
+ struct xlbd_disk_info *di = (struct xlbd_disk_info *)gd->private_data;
+
+ /*
+ * When usage drops to zero it may allow more VBD updates to occur.
+ * Update of usage count is protected by a per-device semaphore.
+ */
+ if (--di->mi->usage == 0) {
+ vbd_update();
+ }
+
+ return 0;
+ }
+
+
+ int blkif_ioctl(struct inode *inode, struct file *filep,
+ unsigned command, unsigned long argument)
+ {
+ int i;
+ /* struct gendisk *gd = inode->i_bdev->bd_disk; */
+
+ DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n",
+ command, (long)argument, inode->i_rdev);
+
+ switch (command) {
+
+ case HDIO_GETGEO:
+ /* return ENOSYS to use defaults */
+ return -ENOSYS;
+
+ case CDROMMULTISESSION:
+ DPRINTK("FIXME: support multisession CDs later\n");
+ for ( i = 0; i < sizeof(struct cdrom_multisession); i++ )
+ if ( put_user(0, (byte *)(argument + i)) ) return -EFAULT;
+ return 0;
+
+ default:
+ printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
+ command);
+ return -ENOSYS;
+ }
+
+ return 0;
+ }
+
+ #if 0
+ /* check media change: should probably do something here in some cases :-) */
+ int blkif_check(kdev_t dev)
+ {
+ DPRINTK("blkif_check\n");
+ return 0;
+ }
+
+ int blkif_revalidate(kdev_t dev)
+ {
+ struct block_device *bd;
+ struct gendisk *gd;
+ xen_block_t *disk;
+ unsigned long capacity;
+ int i, rc = 0;
+
+ if ( (bd = bdget(dev)) == NULL )
+ return -EINVAL;
+
+ /*
+ * Update of partition info, and check of usage count, is protected
+ * by the per-block-device semaphore.
+ */
+ down(&bd->bd_sem);
+
+ if ( ((gd = get_gendisk(dev)) == NULL) ||
+ ((disk = xldev_to_xldisk(dev)) == NULL) ||
+ ((capacity = gd->part[MINOR(dev)].nr_sects) == 0) )
+ {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if ( disk->usage > 1 )
+ {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ /* Only reread partition table if VBDs aren't mapped to partitions. */
+ if ( !(gd->flags[MINOR(dev) >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS) )
+ {
+ for ( i = gd->max_p - 1; i >= 0; i-- )
+ {
+ invalidate_device(dev+i, 1);
+ gd->part[MINOR(dev+i)].start_sect = 0;
+ gd->part[MINOR(dev+i)].nr_sects = 0;
+ gd->sizes[MINOR(dev+i)] = 0;
+ }
+
+ grok_partitions(gd, MINOR(dev)>>gd->minor_shift, gd->max_p, capacity);
+ }
+
+ out:
+ up(&bd->bd_sem);
+ bdput(bd);
+ return rc;
+ }
+ #endif
+
+ /*
+ * blkif_queue_request
+ *
+ * request block io
+ *
+ * id: for guest use only.
+ * operation: BLKIF_OP_{READ,WRITE,PROBE}
+ * buffer: buffer to read/write into. this should be a
+ * virtual address in the guest os.
+ */
+ static int blkif_queue_request(struct request *req)
+ {
+ struct xlbd_disk_info *di =
+ (struct xlbd_disk_info *)req->rq_disk->private_data;
+ unsigned long buffer_ma;
+ blkif_request_t *ring_req;
+ struct bio *bio;
+ struct bio_vec *bvec;
+ int idx;
+ unsigned long id;
+ unsigned int fsect, lsect;
+
+ if ( unlikely(blkif_state != BLKIF_STATE_CONNECTED) )
+ return 1;
+
+ /* Fill out a communications ring structure. */
- req_prod++;
-
++ ring_req = RING_GET_REQUEST(&blk_ring, blk_ring.req_prod_pvt);
+ id = GET_ID_FROM_FREELIST();
+ rec_ring[id].id = (unsigned long) req;
+
+ ring_req->id = id;
+ ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE :
+ BLKIF_OP_READ;
+ ring_req->sector_number = (blkif_sector_t)req->sector;
+ ring_req->device = di->xd_device;
+
+ ring_req->nr_segments = 0;
+ rq_for_each_bio(bio, req)
+ {
+ bio_for_each_segment(bvec, bio, idx)
+ {
+ if ( ring_req->nr_segments == BLKIF_MAX_SEGMENTS_PER_REQUEST )
+ BUG();
+ buffer_ma = page_to_phys(bvec->bv_page);
+ fsect = bvec->bv_offset >> 9;
+ lsect = fsect + (bvec->bv_len >> 9) - 1;
+ ring_req->frame_and_sects[ring_req->nr_segments++] =
+ buffer_ma | (fsect << 3) | lsect;
+ }
+ }
+
- if ( BLKIF_RING_FULL )
++ blk_ring.req_prod_pvt++;
++
+ /* Keep a private copy so we can reissue requests when recovering. */
+ translate_req_to_pfn(&rec_ring[id], ring_req);
+
+ return 0;
+ }
+
+
+ /*
+ * do_blkif_request
+ * read a block; request is in a request queue
+ */
+ void do_blkif_request(request_queue_t *rq)
+ {
+ struct request *req;
+ int queued;
+
+ DPRINTK("Entered do_blkif_request\n");
+
+ queued = 0;
+
+ while ((req = elv_next_request(rq)) != NULL) {
+ if (!blk_fs_request(req)) {
+ end_request(req, 0);
+ continue;
+ }
+
- BLKIF_RING_IDX i, rp;
++ if ( RING_FULL(&blk_ring) )
+ {
+ blk_stop_queue(rq);
+ break;
+ }
+ DPRINTK("do_blk_req %p: cmd %p, sec %lx, (%u/%li) buffer:%p [%s]\n",
+ req, req->cmd, req->sector, req->current_nr_sectors,
+ req->nr_sectors, req->buffer,
+ rq_data_dir(req) ? "write" : "read");
+ blkdev_dequeue_request(req);
+ if (blkif_queue_request(req)) {
+ blk_stop_queue(rq);
+ break;
+ }
+ queued++;
+ }
+
+ if (queued != 0)
+ flush_requests();
+ }
+
+
+ static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
+ {
+ struct request *req;
+ blkif_response_t *bret;
-
++ RING_IDX i, rp;
+ unsigned long flags;
-
- rp = blk_ring->resp_prod;
++
+ spin_lock_irqsave(&blkif_io_lock, flags);
+
+ if ( unlikely(blkif_state == BLKIF_STATE_CLOSED) ||
+ unlikely(recovery) )
+ {
+ spin_unlock_irqrestore(&blkif_io_lock, flags);
+ return IRQ_HANDLED;
+ }
- for ( i = resp_cons; i != rp; i++ )
++
++ rp = blk_ring.sring->rsp_prod;
+ rmb(); /* Ensure we see queued responses up to 'rp'. */
+
- bret = &blk_ring->ring[MASK_BLKIF_IDX(i)].resp;
++ for ( i = blk_ring.rsp_cons; i != rp; i++ )
+ {
+ unsigned long id;
-
+
++ bret = RING_GET_RESPONSE(&blk_ring, i);
+ id = bret->id;
+ req = (struct request *)rec_ring[id].id;
-
- resp_cons = i;
+ blkif_completion( &rec_ring[id] );
+
+ ADD_ID_TO_FREELIST(id); /* overwrites req */
+
+ switch ( bret->operation )
+ {
+ case BLKIF_OP_READ:
+ case BLKIF_OP_WRITE:
+ if ( unlikely(bret->status != BLKIF_RSP_OKAY) )
+ DPRINTK("Bad return from blkdev data request: %x\n",
+ bret->status);
+
+ if ( unlikely(end_that_request_first
+ (req,
+ (bret->status == BLKIF_RSP_OKAY),
+ req->hard_nr_sectors)) )
+ BUG();
+ end_that_request_last(req);
+
+ break;
+ case BLKIF_OP_PROBE:
+ memcpy(&blkif_control_rsp, bret, sizeof(*bret));
+ blkif_control_rsp_valid = 1;
+ break;
+ default:
+ BUG();
+ }
+ }
-
+
++ blk_ring.rsp_cons = i;
++
+ kick_pending_request_queues();
+
+ spin_unlock_irqrestore(&blkif_io_lock, flags);
+
+ return IRQ_HANDLED;
+ }
+
+ #else
+ /************************** KERNEL VERSION 2.4 **************************/
+
+ static kdev_t sg_dev;
+ static unsigned long sg_next_sect;
+
+ /*
+ * Request queues with outstanding work, but ring is currently full.
+ * We need no special lock here, as we always access this with the
+ * blkif_io_lock held. We only need a small maximum list.
+ */
+ #define MAX_PENDING 8
+ static request_queue_t *pending_queues[MAX_PENDING];
+ static int nr_pending;
+
+
+ #define blkif_io_lock io_request_lock
+
+ /*============================================================================*/
+ #if ENABLE_VBD_UPDATE
+
+ /*
+ * blkif_update_int/update-vbds_task - handle VBD update events.
+ * Schedule a task for keventd to run, which will update the VBDs and perform
+ * the corresponding updates to our view of VBD state.
+ */
+ static void update_vbds_task(void *unused)
+ {
+ xlvbd_update_vbds();
+ }
+
+ static void vbd_update(void)
+ {
+ static struct tq_struct update_tq;
+ update_tq.routine = update_vbds_task;
+ schedule_task(&update_tq);
+ }
+
+ #endif /* ENABLE_VBD_UPDATE */
+ /*============================================================================*/
+
- ((req_prod - resp_cons) < (BLKIF_RING_SIZE >> 1)) )
+ static void kick_pending_request_queues(void)
+ {
+ /* We kick pending request queues if the ring is reasonably empty. */
+ if ( (nr_pending != 0) &&
- while ( (nr_pending != 0) && !BLKIF_RING_FULL )
++ (RING_PENDING_REQUESTS(&blk_ring) <
++ (RING_SIZE(&blk_ring) >> 1)) )
+ {
+ /* Attempt to drain the queue, but bail if the ring becomes full. */
-
- req = &blk_ring->ring[MASK_BLKIF_IDX(req_prod-1)].req;
++ while ( (nr_pending != 0) && !RING_FULL(&blk_ring) )
+ do_blkif_request(pending_queues[--nr_pending]);
+ }
+ }
+
+ int blkif_open(struct inode *inode, struct file *filep)
+ {
+ short xldev = inode->i_rdev;
+ struct gendisk *gd = get_gendisk(xldev);
+ xl_disk_t *disk = xldev_to_xldisk(inode->i_rdev);
+ short minor = MINOR(xldev);
+
+ if ( gd->part[minor].nr_sects == 0 )
+ {
+ /*
+ * Device either doesn't exist, or has zero capacity; we use a few
+ * cheesy heuristics to return the relevant error code
+ */
+ if ( (gd->sizes[minor >> gd->minor_shift] != 0) ||
+ ((minor & (gd->max_p - 1)) != 0) )
+ {
+ /*
+ * We have a real device, but no such partition, or we just have a
+ * partition number so guess this is the problem.
+ */
+ return -ENXIO; /* no such device or address */
+ }
+ else if ( gd->flags[minor >> gd->minor_shift] & GENHD_FL_REMOVABLE )
+ {
+ /* This is a removable device => assume that media is missing. */
+ return -ENOMEDIUM; /* media not present (this is a guess) */
+ }
+ else
+ {
+ /* Just go for the general 'no such device' error. */
+ return -ENODEV; /* no such device */
+ }
+ }
+
+ /* Update of usage count is protected by per-device semaphore. */
+ disk->usage++;
+
+ return 0;
+ }
+
+
+ int blkif_release(struct inode *inode, struct file *filep)
+ {
+ xl_disk_t *disk = xldev_to_xldisk(inode->i_rdev);
+
+ /*
+ * When usage drops to zero it may allow more VBD updates to occur.
+ * Update of usage count is protected by a per-device semaphore.
+ */
+ if ( --disk->usage == 0 ) {
+ vbd_update();
+ }
+
+ return 0;
+ }
+
+
+ int blkif_ioctl(struct inode *inode, struct file *filep,
+ unsigned command, unsigned long argument)
+ {
+ kdev_t dev = inode->i_rdev;
+ struct hd_geometry *geo = (struct hd_geometry *)argument;
+ struct gendisk *gd;
+ struct hd_struct *part;
+ int i;
+ unsigned short cylinders;
+ byte heads, sectors;
+
+ /* NB. No need to check permissions. That is done for us. */
+
+ DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n",
+ command, (long) argument, dev);
+
+ gd = get_gendisk(dev);
+ part = &gd->part[MINOR(dev)];
+
+ switch ( command )
+ {
+ case BLKGETSIZE:
+ DPRINTK_IOCTL(" BLKGETSIZE: %x %lx\n", BLKGETSIZE, part->nr_sects);
+ return put_user(part->nr_sects, (unsigned long *) argument);
+
+ case BLKGETSIZE64:
+ DPRINTK_IOCTL(" BLKGETSIZE64: %x %llx\n", BLKGETSIZE64,
+ (u64)part->nr_sects * 512);
+ return put_user((u64)part->nr_sects * 512, (u64 *) argument);
+
+ case BLKRRPART: /* re-read partition table */
+ DPRINTK_IOCTL(" BLKRRPART: %x\n", BLKRRPART);
+ return blkif_revalidate(dev);
+
+ case BLKSSZGET:
+ return hardsect_size[MAJOR(dev)][MINOR(dev)];
+
+ case BLKBSZGET: /* get block size */
+ DPRINTK_IOCTL(" BLKBSZGET: %x\n", BLKBSZGET);
+ break;
+
+ case BLKBSZSET: /* set block size */
+ DPRINTK_IOCTL(" BLKBSZSET: %x\n", BLKBSZSET);
+ break;
+
+ case BLKRASET: /* set read-ahead */
+ DPRINTK_IOCTL(" BLKRASET: %x\n", BLKRASET);
+ break;
+
+ case BLKRAGET: /* get read-ahead */
+ DPRINTK_IOCTL(" BLKRAFET: %x\n", BLKRAGET);
+ break;
+
+ case HDIO_GETGEO:
+ DPRINTK_IOCTL(" HDIO_GETGEO: %x\n", HDIO_GETGEO);
+ if (!argument) return -EINVAL;
+
+ /* We don't have real geometry info, but let's at least return
+ values consistent with the size of the device */
+
+ heads = 0xff;
+ sectors = 0x3f;
+ cylinders = part->nr_sects / (heads * sectors);
+
+ if (put_user(0x00, (unsigned long *) &geo->start)) return -EFAULT;
+ if (put_user(heads, (byte *)&geo->heads)) return -EFAULT;
+ if (put_user(sectors, (byte *)&geo->sectors)) return -EFAULT;
+ if (put_user(cylinders, (unsigned short *)&geo->cylinders)) return -EFAULT;
+
+ return 0;
+
+ case HDIO_GETGEO_BIG:
+ DPRINTK_IOCTL(" HDIO_GETGEO_BIG: %x\n", HDIO_GETGEO_BIG);
+ if (!argument) return -EINVAL;
+
+ /* We don't have real geometry info, but let's at least return
+ values consistent with the size of the device */
+
+ heads = 0xff;
+ sectors = 0x3f;
+ cylinders = part->nr_sects / (heads * sectors);
+
+ if (put_user(0x00, (unsigned long *) &geo->start)) return -EFAULT;
+ if (put_user(heads, (byte *)&geo->heads)) return -EFAULT;
+ if (put_user(sectors, (byte *)&geo->sectors)) return -EFAULT;
+ if (put_user(cylinders, (unsigned int *) &geo->cylinders)) return -EFAULT;
+
+ return 0;
+
+ case CDROMMULTISESSION:
+ DPRINTK("FIXME: support multisession CDs later\n");
+ for ( i = 0; i < sizeof(struct cdrom_multisession); i++ )
+ if ( put_user(0, (byte *)(argument + i)) ) return -EFAULT;
+ return 0;
+
+ case SCSI_IOCTL_GET_BUS_NUMBER:
+ DPRINTK("FIXME: SCSI_IOCTL_GET_BUS_NUMBER ioctl in XL blkif");
+ return -ENOSYS;
+
+ default:
+ printk(KERN_ALERT "ioctl %08x not supported by XL blkif\n", command);
+ return -ENOSYS;
+ }
+
+ return 0;
+ }
+
+
+
+ /* check media change: should probably do something here in some cases :-) */
+ int blkif_check(kdev_t dev)
+ {
+ DPRINTK("blkif_check\n");
+ return 0;
+ }
+
+ int blkif_revalidate(kdev_t dev)
+ {
+ struct block_device *bd;
+ struct gendisk *gd;
+ xl_disk_t *disk;
+ unsigned long capacity;
+ int i, rc = 0;
+
+ if ( (bd = bdget(dev)) == NULL )
+ return -EINVAL;
+
+ /*
+ * Update of partition info, and check of usage count, is protected
+ * by the per-block-device semaphore.
+ */
+ down(&bd->bd_sem);
+
+ if ( ((gd = get_gendisk(dev)) == NULL) ||
+ ((disk = xldev_to_xldisk(dev)) == NULL) ||
+ ((capacity = gd->part[MINOR(dev)].nr_sects) == 0) )
+ {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if ( disk->usage > 1 )
+ {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ /* Only reread partition table if VBDs aren't mapped to partitions. */
+ if ( !(gd->flags[MINOR(dev) >> gd->minor_shift] & GENHD_FL_VIRT_PARTNS) )
+ {
+ for ( i = gd->max_p - 1; i >= 0; i-- )
+ {
+ invalidate_device(dev+i, 1);
+ gd->part[MINOR(dev+i)].start_sect = 0;
+ gd->part[MINOR(dev+i)].nr_sects = 0;
+ gd->sizes[MINOR(dev+i)] = 0;
+ }
+
+ grok_partitions(gd, MINOR(dev)>>gd->minor_shift, gd->max_p, capacity);
+ }
+
+ out:
+ up(&bd->bd_sem);
+ bdput(bd);
+ return rc;
+ }
+
+
+ /*
+ * blkif_queue_request
+ *
+ * request block io
+ *
+ * id: for guest use only.
+ * operation: BLKIF_OP_{READ,WRITE,PROBE}
+ * buffer: buffer to read/write into. this should be a
+ * virtual address in the guest os.
+ */
+ static int blkif_queue_request(unsigned long id,
+ int operation,
+ char * buffer,
+ unsigned long sector_number,
+ unsigned short nr_sectors,
+ kdev_t device)
+ {
+ unsigned long buffer_ma = virt_to_bus(buffer);
+ unsigned long xid;
+ struct gendisk *gd;
+ blkif_request_t *req;
+ struct buffer_head *bh;
+ unsigned int fsect, lsect;
+
+ fsect = (buffer_ma & ~PAGE_MASK) >> 9;
+ lsect = fsect + nr_sectors - 1;
+
+ /* Buffer must be sector-aligned. Extent mustn't cross a page boundary. */
+ if ( unlikely((buffer_ma & ((1<<9)-1)) != 0) )
+ BUG();
+ if ( lsect > 7 )
+ BUG();
+
+ buffer_ma &= PAGE_MASK;
+
+ if ( unlikely(blkif_state != BLKIF_STATE_CONNECTED) )
+ return 1;
+
+ switch ( operation )
+ {
+
+ case BLKIF_OP_READ:
+ case BLKIF_OP_WRITE:
+ gd = get_gendisk(device);
+
+ /*
+ * Update the sector_number we'll pass down as appropriate; note that
+ * we could sanity check that resulting sector will be in this
+ * partition, but this will happen in driver backend anyhow.
+ */
+ sector_number += gd->part[MINOR(device)].start_sect;
+
+ /*
+ * If this unit doesn't consist of virtual partitions then we clear
+ * the partn bits from the device number.
+ */
+ if ( !(gd->flags[MINOR(device)>>gd->minor_shift] &
+ GENHD_FL_VIRT_PARTNS) )
+ device &= ~(gd->max_p - 1);
+
+ if ( (sg_operation == operation) &&
+ (sg_dev == device) &&
+ (sg_next_sect == sector_number) )
+ {
- else if ( BLKIF_RING_FULL )
++ req = RING_GET_REQUEST(&blk_ring,
++ blk_ring.req_prod_pvt - 1);
+ bh = (struct buffer_head *)id;
+
+ bh->b_reqnext = (struct buffer_head *)rec_ring[req->id].id;
+
+
+ rec_ring[req->id].id = id;
+
+ req->frame_and_sects[req->nr_segments] =
+ buffer_ma | (fsect<<3) | lsect;
+ if ( ++req->nr_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST )
+ sg_next_sect += nr_sectors;
+ else
+ DISABLE_SCATTERGATHER();
+
+ /* Update the copy of the request in the recovery ring. */
+ translate_req_to_pfn(&rec_ring[req->id], req );
+
+ return 0;
+ }
- req = &blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req;
++ else if ( RING_FULL(&blk_ring) )
+ {
+ return 1;
+ }
+ else
+ {
+ sg_operation = operation;
+ sg_dev = device;
+ sg_next_sect = sector_number + nr_sectors;
+ }
+ break;
+
+ default:
+ panic("unknown op %d\n", operation);
+ }
+
+ /* Fill out a communications ring structure. */
- req_prod++;
-
++ req = RING_GET_REQUEST(&blk_ring, blk_ring.req_prod_pvt);
+
+ xid = GET_ID_FROM_FREELIST();
+ rec_ring[xid].id = id;
+
+ req->id = xid;
+ req->operation = operation;
+ req->sector_number = (blkif_sector_t)sector_number;
+ req->device = device;
+ req->nr_segments = 1;
+ req->frame_and_sects[0] = buffer_ma | (fsect<<3) | lsect;
+
- BLKIF_RING_IDX i, rp;
+ /* Keep a private copy so we can reissue requests when recovering. */
+ translate_req_to_pfn(&rec_ring[xid], req );
+
++ blk_ring.req_prod_pvt++;
++
+ return 0;
+ }
+
+
+ /*
+ * do_blkif_request
+ * read a block; request is in a request queue
+ */
+ void do_blkif_request(request_queue_t *rq)
+ {
+ struct request *req;
+ struct buffer_head *bh, *next_bh;
+ int rw, nsect, full, queued = 0;
+
+ DPRINTK("Entered do_blkif_request\n");
+
+ while ( !rq->plugged && !list_empty(&rq->queue_head))
+ {
+ if ( (req = blkdev_entry_next_request(&rq->queue_head)) == NULL )
+ goto out;
+
+ DPRINTK("do_blkif_request %p: cmd %i, sec %lx, (%li/%li) bh:%p\n",
+ req, req->cmd, req->sector,
+ req->current_nr_sectors, req->nr_sectors, req->bh);
+
+ rw = req->cmd;
+ if ( rw == READA )
+ rw = READ;
+ if ( unlikely((rw != READ) && (rw != WRITE)) )
+ panic("XenoLinux Virtual Block Device: bad cmd: %d\n", rw);
+
+ req->errors = 0;
+
+ bh = req->bh;
+ while ( bh != NULL )
+ {
+ next_bh = bh->b_reqnext;
+ bh->b_reqnext = NULL;
+
+ full = blkif_queue_request(
+ (unsigned long)bh,
+ (rw == READ) ? BLKIF_OP_READ : BLKIF_OP_WRITE,
+ bh->b_data, bh->b_rsector, bh->b_size>>9, bh->b_rdev);
+
+ if ( full )
+ {
+ bh->b_reqnext = next_bh;
+ pending_queues[nr_pending++] = rq;
+ if ( unlikely(nr_pending >= MAX_PENDING) )
+ BUG();
+ goto out;
+ }
+
+ queued++;
+
+ /* Dequeue the buffer head from the request. */
+ nsect = bh->b_size >> 9;
+ bh = req->bh = next_bh;
+
+ if ( bh != NULL )
+ {
+ /* There's another buffer head to do. Update the request. */
+ req->hard_sector += nsect;
+ req->hard_nr_sectors -= nsect;
+ req->sector = req->hard_sector;
+ req->nr_sectors = req->hard_nr_sectors;
+ req->current_nr_sectors = bh->b_size >> 9;
+ req->buffer = bh->b_data;
+ }
+ else
+ {
+ /* That was the last buffer head. Finalise the request. */
+ if ( unlikely(end_that_request_first(req, 1, "XenBlk")) )
+ BUG();
+ blkdev_dequeue_request(req);
+ end_that_request_last(req);
+ }
+ }
+ }
+
+ out:
+ if ( queued != 0 )
+ flush_requests();
+ }
+
+
+ static void blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
+ {
- rp = blk_ring->resp_prod;
++ RING_IDX i, rp;
+ unsigned long flags;
+ struct buffer_head *bh, *next_bh;
+
+ spin_lock_irqsave(&io_request_lock, flags);
+
+ if ( unlikely(blkif_state == BLKIF_STATE_CLOSED || recovery) )
+ {
+ spin_unlock_irqrestore(&io_request_lock, flags);
+ return;
+ }
+
- for ( i = resp_cons; i != rp; i++ )
++ rp = blk_ring.sring->rsp_prod;
+ rmb(); /* Ensure we see queued responses up to 'rp'. */
+
- blkif_response_t *bret = &blk_ring->ring[MASK_BLKIF_IDX(i)].resp;
-
++ for ( i = blk_ring.rsp_cons; i != rp; i++ )
+ {
+ unsigned long id;
- resp_cons = i;
-
++ blkif_response_t *bret;
++
++ bret = RING_GET_RESPONSE(&blk_ring, i);
+ id = bret->id;
+ bh = (struct buffer_head *)rec_ring[id].id;
+
+ blkif_completion( &rec_ring[id] );
+
+ ADD_ID_TO_FREELIST(id);
+
+ switch ( bret->operation )
+ {
+ case BLKIF_OP_READ:
+ case BLKIF_OP_WRITE:
+ if ( unlikely(bret->status != BLKIF_RSP_OKAY) )
+ DPRINTK("Bad return from blkdev data request: %lx\n",
+ bret->status);
+ for ( ; bh != NULL; bh = next_bh )
+ {
+ next_bh = bh->b_reqnext;
+ bh->b_reqnext = NULL;
+ bh->b_end_io(bh, bret->status == BLKIF_RSP_OKAY);
+ }
+
+ break;
+ case BLKIF_OP_PROBE:
+ memcpy(&blkif_control_rsp, bret, sizeof(*bret));
+ blkif_control_rsp_valid = 1;
+ break;
+ default:
+ BUG();
+ }
++
+ }
++ blk_ring.rsp_cons = i;
+
- while ( (req_prod - resp_cons) == BLKIF_RING_SIZE )
+ kick_pending_request_queues();
+
+ spin_unlock_irqrestore(&io_request_lock, flags);
+ }
+
+ #endif
+
+ /***************************** COMMON CODE *******************************/
+
+
+ void blkif_control_send(blkif_request_t *req, blkif_response_t *rsp)
+ {
+ unsigned long flags, id;
++ blkif_request_t *req_d;
+
+ retry:
- if ( (req_prod - resp_cons) == BLKIF_RING_SIZE )
++ while ( RING_FULL(&blk_ring) )
+ {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(1);
+ }
+
+ spin_lock_irqsave(&blkif_io_lock, flags);
- blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req = *req;
++ if ( RING_FULL(&blk_ring) )
+ {
+ spin_unlock_irqrestore(&blkif_io_lock, flags);
+ goto retry;
+ }
+
+ DISABLE_SCATTERGATHER();
- blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req.id = id;
++ req_d = RING_GET_REQUEST(&blk_ring, blk_ring.req_prod_pvt);
++ *req_d = *req;
+
+ id = GET_ID_FROM_FREELIST();
- req_prod++;
++ req_d->id = id;
+ rec_ring[id].id = (unsigned long) req;
+
+ translate_req_to_pfn( &rec_ring[id], req );
+
- msg->shmem_frame = (virt_to_machine(blk_ring) >> PAGE_SHIFT);
++ blk_ring.req_prod_pvt++;
+ flush_requests();
+
+ spin_unlock_irqrestore(&blkif_io_lock, flags);
+
+ while ( !blkif_control_rsp_valid )
+ {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(1);
+ }
+
+ memcpy(rsp, &blkif_control_rsp, sizeof(*rsp));
+ blkif_control_rsp_valid = 0;
+ }
+
+
+ /* Send a driver status notification to the domain controller. */
+ static void send_driver_status(int ok)
+ {
+ ctrl_msg_t cmsg = {
+ .type = CMSG_BLKIF_FE,
+ .subtype = CMSG_BLKIF_FE_DRIVER_STATUS,
+ .length = sizeof(blkif_fe_driver_status_t),
+ };
+ blkif_fe_driver_status_t *msg = (void*)cmsg.msg;
+
+ msg->status = (ok ? BLKIF_DRIVER_STATUS_UP : BLKIF_DRIVER_STATUS_DOWN);
+
+ ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+ }
+
+ /* Tell the controller to bring up the interface. */
+ static void blkif_send_interface_connect(void)
+ {
+ ctrl_msg_t cmsg = {
+ .type = CMSG_BLKIF_FE,
+ .subtype = CMSG_BLKIF_FE_INTERFACE_CONNECT,
+ .length = sizeof(blkif_fe_interface_connect_t),
+ };
+ blkif_fe_interface_connect_t *msg = (void*)cmsg.msg;
+
+ msg->handle = 0;
- if ( blk_ring != NULL )
++ msg->shmem_frame = (virt_to_machine(blk_ring.sring) >> PAGE_SHIFT);
+
+ ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+ }
+
+ static void blkif_free(void)
+ {
+ /* Prevent new requests being issued until we fix things up. */
+ spin_lock_irq(&blkif_io_lock);
+ recovery = 1;
+ blkif_state = BLKIF_STATE_DISCONNECTED;
+ spin_unlock_irq(&blkif_io_lock);
+
+ /* Free resources associated with old device channel. */
- free_page((unsigned long)blk_ring);
- blk_ring = NULL;
++ if ( blk_ring.sring != NULL )
+ {
- if ( blk_ring != NULL )
- free_page((unsigned long)blk_ring);
- blk_ring = (blkif_ring_t *)__get_free_page(GFP_KERNEL);
- blk_ring->req_prod = blk_ring->resp_prod = resp_cons = req_prod = 0;
++ free_page((unsigned long)blk_ring.sring);
++ blk_ring.sring = NULL;
+ }
+ free_irq(blkif_irq, NULL);
+ blkif_irq = 0;
+
+ unbind_evtchn_from_irq(blkif_evtchn);
+ blkif_evtchn = 0;
+ }
+
+ static void blkif_close(void)
+ {
+ }
+
+ /* Move from CLOSED to DISCONNECTED state. */
+ static void blkif_disconnect(void)
+ {
- for ( i = 0; i < BLKIF_RING_SIZE; i++ )
++ blkif_sring_t *sring;
++
++ if ( blk_ring.sring != NULL )
++ free_page((unsigned long)blk_ring.sring);
++
++ sring = (blkif_sring_t *)__get_free_page(GFP_KERNEL);
++ SHARED_RING_INIT(sring);
++ FRONT_RING_INIT(&blk_ring, sring);
+ blkif_state = BLKIF_STATE_DISCONNECTED;
+ blkif_send_interface_connect();
+ }
+
+ static void blkif_reset(void)
+ {
+ blkif_free();
+ blkif_disconnect();
+ }
+
+ static void blkif_recover(void)
+ {
+ int i;
++ blkif_request_t *req;
+
+ /* Hmm, requests might be re-ordered when we re-issue them.
+ * This will need to be fixed once we have barriers */
+
+ /* Stage 1 : Find active and move to safety. */
- translate_req_to_mfn(
- &blk_ring->ring[req_prod].req, &rec_ring[i]);
- req_prod++;
++ for ( i = 0; i < RING_SIZE(&blk_ring); i++ )
+ {
+ if ( rec_ring[i].id >= PAGE_OFFSET )
+ {
- for ( i = 0; i < req_prod; i++ )
++ req = RING_GET_REQUEST(&blk_ring,
++ blk_ring.req_prod_pvt);
++ translate_req_to_mfn(req, &rec_ring[i]);
++ blk_ring.req_prod_pvt++;
+ }
+ }
+
+ /* Stage 2 : Set up shadow list. */
- rec_ring[i].id = blk_ring->ring[i].req.id;
- blk_ring->ring[i].req.id = i;
- translate_req_to_pfn(&rec_ring[i], &blk_ring->ring[i].req);
++ for ( i = 0; i < blk_ring.req_prod_pvt; i++ )
+ {
- for ( ; i < BLKIF_RING_SIZE; i++ )
++ req = RING_GET_REQUEST(&blk_ring, i);
++ rec_ring[i].id = req->id;
++ req->id = i;
++ translate_req_to_pfn(&rec_ring[i], req);
+ }
+
+ /* Stage 3 : Set up free list. */
- rec_ring_free = req_prod;
- rec_ring[BLKIF_RING_SIZE-1].id = 0x0fffffff;
++ for ( ; i < RING_SIZE(&blk_ring); i++ )
+ rec_ring[i].id = i+1;
- WPRINTK(" Invalid blkif: handle=%u", status->handle);
++ rec_ring_free = blk_ring.req_prod_pvt;
++ rec_ring[RING_SIZE(&blk_ring)-1].id = 0x0fffffff;
+
+ /* blk_ring->req_prod will be set when we flush_requests().*/
+ wmb();
+
+ /* Switch off recovery mode, using a memory barrier to ensure that
+ * it's seen before we flush requests - we don't want to miss any
+ * interrupts. */
+ recovery = 0;
+ wmb();
+
+ /* Kicks things back into life. */
+ flush_requests();
+
+ /* Now safe to left other peope use interface. */
+ blkif_state = BLKIF_STATE_CONNECTED;
+ }
+
+ static void blkif_connect(blkif_fe_interface_status_t *status)
+ {
+ int err = 0;
+
+ blkif_evtchn = status->evtchn;
+ blkif_irq = bind_evtchn_to_irq(blkif_evtchn);
+
+ err = request_irq(blkif_irq, blkif_int, SA_SAMPLE_RANDOM, "blkif", NULL);
+ if ( err )
+ {
+ printk(KERN_ALERT "xen_blk: request_irq failed (err=%d)\n", err);
+ return;
+ }
+
+ if ( recovery )
+ {
+ blkif_recover();
+ }
+ else
+ {
+ /* Transition to connected in case we need to do
+ * a partition probe on a whole disk. */
+ blkif_state = BLKIF_STATE_CONNECTED;
+
+ /* Probe for discs attached to the interface. */
+ xlvbd_init();
+ }
+
+ /* Kick pending requests. */
+ spin_lock_irq(&blkif_io_lock);
+ kick_pending_request_queues();
+ spin_unlock_irq(&blkif_io_lock);
+ }
+
+ static void unexpected(blkif_fe_interface_status_t *status)
+ {
+ DPRINTK(" Unexpected blkif status %u in state %u\n",
+ status->status, blkif_state);
+ }
+
+ static void blkif_status(blkif_fe_interface_status_t *status)
+ {
+ if ( status->handle != blkif_handle )
+ {
- for ( i = 0; i < BLKIF_RING_SIZE; i++ )
++ WPRINTK(" Invalid blkif: handle=%u\n", status->handle);
++ unexpected(status);
+ return;
+ }
+
+ switch ( status->status )
+ {
+ case BLKIF_INTERFACE_STATUS_CLOSED:
+ switch ( blkif_state )
+ {
+ case BLKIF_STATE_CLOSED:
+ unexpected(status);
+ break;
+ case BLKIF_STATE_DISCONNECTED:
+ case BLKIF_STATE_CONNECTED:
+ unexpected(status);
+ blkif_close();
+ break;
+ }
+ break;
+
+ case BLKIF_INTERFACE_STATUS_DISCONNECTED:
+ switch ( blkif_state )
+ {
+ case BLKIF_STATE_CLOSED:
+ blkif_disconnect();
+ break;
+ case BLKIF_STATE_DISCONNECTED:
+ case BLKIF_STATE_CONNECTED:
+ /* unexpected(status); */ /* occurs during suspend/resume */
+ blkif_reset();
+ break;
+ }
+ break;
+
+ case BLKIF_INTERFACE_STATUS_CONNECTED:
+ switch ( blkif_state )
+ {
+ case BLKIF_STATE_CLOSED:
+ unexpected(status);
+ blkif_disconnect();
+ blkif_connect(status);
+ break;
+ case BLKIF_STATE_DISCONNECTED:
+ blkif_connect(status);
+ break;
+ case BLKIF_STATE_CONNECTED:
+ unexpected(status);
+ blkif_connect(status);
+ break;
+ }
+ break;
+
+ case BLKIF_INTERFACE_STATUS_CHANGED:
+ switch ( blkif_state )
+ {
+ case BLKIF_STATE_CLOSED:
+ case BLKIF_STATE_DISCONNECTED:
+ unexpected(status);
+ break;
+ case BLKIF_STATE_CONNECTED:
+ vbd_update();
+ break;
+ }
+ break;
+
+ default:
+ WPRINTK(" Invalid blkif status: %d\n", status->status);
+ break;
+ }
+ }
+
+
+ static void blkif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
+ {
+ switch ( msg->subtype )
+ {
+ case CMSG_BLKIF_FE_INTERFACE_STATUS:
+ if ( msg->length != sizeof(blkif_fe_interface_status_t) )
+ goto parse_error;
+ blkif_status((blkif_fe_interface_status_t *)
+ &msg->msg[0]);
+ break;
+ default:
+ goto parse_error;
+ }
+
+ ctrl_if_send_response(msg);
+ return;
+
+ parse_error:
+ msg->length = 0;
+ ctrl_if_send_response(msg);
+ }
+
+ int wait_for_blkif(void)
+ {
+ int err = 0;
+ int i;
+ send_driver_status(1);
+
+ /*
+ * We should read 'nr_interfaces' from response message and wait
+ * for notifications before proceeding. For now we assume that we
+ * will be notified of exactly one interface.
+ */
+ for ( i=0; (blkif_state != BLKIF_STATE_CONNECTED) && (i < 10*HZ); i++ )
+ {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(1);
+ }
+
+ if ( blkif_state != BLKIF_STATE_CONNECTED )
+ {
+ printk(KERN_INFO "xen_blk: Timeout connecting to device!\n");
+ err = -ENOSYS;
+ }
+ return err;
+ }
+
+ int __init xlblk_init(void)
+ {
+ int i;
+
+ if ( (xen_start_info.flags & SIF_INITDOMAIN) ||
+ (xen_start_info.flags & SIF_BLK_BE_DOMAIN) )
+ return 0;
+
+ printk(KERN_INFO "xen_blk: Initialising virtual block device driver\n");
+
+ rec_ring_free = 0;
- rec_ring[BLKIF_RING_SIZE-1].id = 0x0fffffff;
++ for ( i = 0; i < RING_SIZE(&blk_ring); i++ )
+ rec_ring[i].id = i+1;
++ rec_ring[RING_SIZE(&blk_ring)-1].id = 0x0fffffff;
+
+ (void)ctrl_if_register_receiver(CMSG_BLKIF_FE, blkif_ctrlif_rx,
+ CALLBACK_IN_BLOCKING_CONTEXT);
+
+ wait_for_blkif();
+
+ return 0;
+ }
+
+ void blkdev_suspend(void)
+ {
+ }
+
+ void blkdev_resume(void)
+ {
+ send_driver_status(1);
+ }
+
+ /* XXXXX THIS IS A TEMPORARY FUNCTION UNTIL WE GET GRANT TABLES */
+
+ void blkif_completion(blkif_request_t *req)
+ {
+ int i;
+
+ switch ( req->operation )
+ {
+ case BLKIF_OP_READ:
+ for ( i = 0; i < req->nr_segments; i++ )
+ {
+ unsigned long pfn = req->frame_and_sects[i] >> PAGE_SHIFT;
+ unsigned long mfn = phys_to_machine_mapping[pfn];
+ xen_machphys_update(mfn, pfn);
+ }
+ break;
+ }
+
+ }
--- /dev/null
+ /******************************************************************************
+ * block.h
+ *
+ * Shared definitions between all levels of XenLinux Virtual block devices.
+ *
+ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
+ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
+ * Copyright (c) 2004-2005, Christian Limpach
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+ #ifndef __XEN_DRIVERS_BLOCK_H__
+ #define __XEN_DRIVERS_BLOCK_H__
+
+ #include <linux/config.h>
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/string.h>
+ #include <linux/errno.h>
+ #include <linux/fs.h>
+ #include <linux/hdreg.h>
+ #include <linux/blkdev.h>
+ #include <linux/major.h>
+ #include <linux/devfs_fs_kernel.h>
+ #include <asm-xen/xen-public/xen.h>
+ #include <asm-xen/xen-public/io/blkif.h>
++#include <asm-xen/xen-public/io/ring.h>
+ #include <asm/io.h>
+ #include <asm/atomic.h>
+ #include <asm/uaccess.h>
+
+ #if 0
+ #define DPRINTK(_f, _a...) printk ( KERN_ALERT _f , ## _a )
+ #else
+ #define DPRINTK(_f, _a...) ((void)0)
+ #endif
+
+ #if 0
+ #define DPRINTK_IOCTL(_f, _a...) printk ( KERN_ALERT _f , ## _a )
+ #else
+ #define DPRINTK_IOCTL(_f, _a...) ((void)0)
+ #endif
+
+ struct xlbd_type_info {
+ int partn_shift;
+ int partn_per_major;
+ int devs_per_major;
+ int hardsect_size;
+ int max_sectors;
+ char *devname;
+ char *diskname;
+ };
+
+ /*
+ * We have one of these per vbd, whether ide, scsi or 'other'. They
+ * hang in private_data off the gendisk structure. We may end up
+ * putting all kinds of interesting stuff here :-)
+ */
+ struct xlbd_major_info {
+ int major;
+ int index;
+ int usage;
+ struct xlbd_type_info *type;
+ };
+
+ struct xlbd_disk_info {
+ int xd_device;
+ struct xlbd_major_info *mi;
+ };
+
+ typedef struct xen_block {
+ int usage;
+ } xen_block_t;
+
+ extern struct request_queue *xlbd_blk_queue;
+ extern spinlock_t blkif_io_lock;
+
+ extern int blkif_open(struct inode *inode, struct file *filep);
+ extern int blkif_release(struct inode *inode, struct file *filep);
+ extern int blkif_ioctl(struct inode *inode, struct file *filep,
+ unsigned command, unsigned long argument);
+ extern int blkif_check(dev_t dev);
+ extern int blkif_revalidate(dev_t dev);
+ extern void blkif_control_send(blkif_request_t *req, blkif_response_t *rsp);
+ extern void do_blkif_request (request_queue_t *rq);
+
+ extern void xlvbd_update_vbds(void);
+
+ /* Virtual block-device subsystem. */
+ extern int xlvbd_init(void);
+ extern void xlvbd_cleanup(void);
+
+ #endif /* __XEN_DRIVERS_BLOCK_H__ */
--- /dev/null
-# define RING_SIZE 2048 /* 2048 16-bit entries */
-# define RING_MASK(_i) ((_i)&(RING_SIZE-1))
+ /******************************************************************************
+ * evtchn.c
+ *
+ * Xenolinux driver for receiving and demuxing event-channel signals.
+ *
+ * Copyright (c) 2004, K A Fraser
+ * Multi-process extensions Copyright (c) 2004, Steven Smith
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+ #include <linux/config.h>
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/string.h>
+ #include <linux/errno.h>
+ #include <linux/fs.h>
+ #include <linux/errno.h>
+ #include <linux/miscdevice.h>
+ #include <linux/major.h>
+ #include <linux/proc_fs.h>
+ #include <linux/stat.h>
+ #include <linux/poll.h>
+ #include <linux/irq.h>
+ #include <linux/init.h>
++#define XEN_EVTCHN_MASK_OPS
+ #include <asm-xen/evtchn.h>
+
+ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ #include <linux/devfs_fs_kernel.h>
+ #define OLD_DEVFS
+ #else
+ #include <linux/gfp.h>
+ #endif
+
+ #ifdef OLD_DEVFS
+ /* NB. This must be shared amongst drivers if more things go in /dev/xen */
+ static devfs_handle_t xen_dev_dir;
+ #endif
+
+ struct per_user_data {
+ /* Notification ring, accessed via /dev/xen/evtchn. */
- if ( (u->ring_prod - u->ring_cons) < RING_SIZE )
++# define EVTCHN_RING_SIZE 2048 /* 2048 16-bit entries */
++# define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
+ u16 *ring;
+ unsigned int ring_cons, ring_prod, ring_overflow;
+
+ /* Processes wait on this queue when ring is empty. */
+ wait_queue_head_t evtchn_wait;
+ struct fasync_struct *evtchn_async_queue;
+ };
+
+ /* Who's bound to each port? */
+ static struct per_user_data *port_user[NR_EVENT_CHANNELS];
+ static spinlock_t port_user_lock;
+
+ void evtchn_device_upcall(int port)
+ {
+ struct per_user_data *u;
+
+ spin_lock(&port_user_lock);
+
+ mask_evtchn(port);
+ clear_evtchn(port);
+
+ if ( (u = port_user[port]) != NULL )
+ {
- u->ring[RING_MASK(u->ring_prod)] = (u16)port;
++ if ( (u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE )
+ {
- if ( ((c ^ p) & RING_SIZE) != 0 )
++ u->ring[EVTCHN_RING_MASK(u->ring_prod)] = (u16)port;
+ if ( u->ring_cons == u->ring_prod++ )
+ {
+ wake_up_interruptible(&u->evtchn_wait);
+ kill_fasync(&u->evtchn_async_queue, SIGIO, POLL_IN);
+ }
+ }
+ else
+ {
+ u->ring_overflow = 1;
+ }
+ }
+
+ spin_unlock(&port_user_lock);
+ }
+
+ static ssize_t evtchn_read(struct file *file, char *buf,
+ size_t count, loff_t *ppos)
+ {
+ int rc;
+ unsigned int c, p, bytes1 = 0, bytes2 = 0;
+ DECLARE_WAITQUEUE(wait, current);
+ struct per_user_data *u = file->private_data;
+
+ add_wait_queue(&u->evtchn_wait, &wait);
+
+ count &= ~1; /* even number of bytes */
+
+ if ( count == 0 )
+ {
+ rc = 0;
+ goto out;
+ }
+
+ if ( count > PAGE_SIZE )
+ count = PAGE_SIZE;
+
+ for ( ; ; )
+ {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if ( (c = u->ring_cons) != (p = u->ring_prod) )
+ break;
+
+ if ( u->ring_overflow )
+ {
+ rc = -EFBIG;
+ goto out;
+ }
+
+ if ( file->f_flags & O_NONBLOCK )
+ {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ if ( signal_pending(current) )
+ {
+ rc = -ERESTARTSYS;
+ goto out;
+ }
+
+ schedule();
+ }
+
+ /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
- bytes1 = (RING_SIZE - RING_MASK(c)) * sizeof(u16);
- bytes2 = RING_MASK(p) * sizeof(u16);
++ if ( ((c ^ p) & EVTCHN_RING_SIZE) != 0 )
+ {
- if ( copy_to_user(buf, &u->ring[RING_MASK(c)], bytes1) ||
++ bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) * sizeof(u16);
++ bytes2 = EVTCHN_RING_MASK(p) * sizeof(u16);
+ }
+ else
+ {
+ bytes1 = (p - c) * sizeof(u16);
+ bytes2 = 0;
+ }
+
+ /* Truncate chunks according to caller's maximum byte count. */
+ if ( bytes1 > count )
+ {
+ bytes1 = count;
+ bytes2 = 0;
+ }
+ else if ( (bytes1 + bytes2) > count )
+ {
+ bytes2 = count - bytes1;
+ }
+
++ if ( copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) ||
+ ((bytes2 != 0) && copy_to_user(&buf[bytes1], &u->ring[0], bytes2)) )
+ {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ u->ring_cons += (bytes1 + bytes2) / sizeof(u16);
+
+ rc = bytes1 + bytes2;
+
+ out:
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&u->evtchn_wait, &wait);
+ return rc;
+ }
+
+ static ssize_t evtchn_write(struct file *file, const char *buf,
+ size_t count, loff_t *ppos)
+ {
+ int rc, i;
+ u16 *kbuf = (u16 *)__get_free_page(GFP_KERNEL);
+ struct per_user_data *u = file->private_data;
+
+ if ( kbuf == NULL )
+ return -ENOMEM;
+
+ count &= ~1; /* even number of bytes */
+
+ if ( count == 0 )
+ {
+ rc = 0;
+ goto out;
+ }
+
+ if ( count > PAGE_SIZE )
+ count = PAGE_SIZE;
+
+ if ( copy_from_user(kbuf, buf, count) != 0 )
+ {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ spin_lock_irq(&port_user_lock);
+ for ( i = 0; i < (count/2); i++ )
+ if ( (kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u) )
+ unmask_evtchn(kbuf[i]);
+ spin_unlock_irq(&port_user_lock);
+
+ rc = count;
+
+ out:
+ free_page((unsigned long)kbuf);
+ return rc;
+ }
+
+ static int evtchn_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+ {
+ int rc = 0;
+ struct per_user_data *u = file->private_data;
+
+ spin_lock_irq(&port_user_lock);
+
+ switch ( cmd )
+ {
+ case EVTCHN_RESET:
+ /* Initialise the ring to empty. Clear errors. */
+ u->ring_cons = u->ring_prod = u->ring_overflow = 0;
+ break;
+
+ case EVTCHN_BIND:
+ if ( arg >= NR_EVENT_CHANNELS )
+ {
+ rc = -EINVAL;
+ }
+ else if ( port_user[arg] != NULL )
+ {
+ rc = -EISCONN;
+ }
+ else
+ {
+ port_user[arg] = u;
+ unmask_evtchn(arg);
+ }
+ break;
+
+ case EVTCHN_UNBIND:
+ if ( arg >= NR_EVENT_CHANNELS )
+ {
+ rc = -EINVAL;
+ }
+ else if ( port_user[arg] != u )
+ {
+ rc = -ENOTCONN;
+ }
+ else
+ {
+ port_user[arg] = NULL;
+ mask_evtchn(arg);
+ }
+ break;
+
+ default:
+ rc = -ENOSYS;
+ break;
+ }
+
+ spin_unlock_irq(&port_user_lock);
+
+ return rc;
+ }
+
+ static unsigned int evtchn_poll(struct file *file, poll_table *wait)
+ {
+ unsigned int mask = POLLOUT | POLLWRNORM;
+ struct per_user_data *u = file->private_data;
+
+ poll_wait(file, &u->evtchn_wait, wait);
+ if ( u->ring_cons != u->ring_prod )
+ mask |= POLLIN | POLLRDNORM;
+ if ( u->ring_overflow )
+ mask = POLLERR;
+ return mask;
+ }
+
+ static int evtchn_fasync(int fd, struct file *filp, int on)
+ {
+ struct per_user_data *u = filp->private_data;
+ return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
+ }
+
+ static int evtchn_open(struct inode *inode, struct file *filp)
+ {
+ struct per_user_data *u;
+
+ if ( (u = kmalloc(sizeof(*u), GFP_KERNEL)) == NULL )
+ return -ENOMEM;
+
+ memset(u, 0, sizeof(*u));
+ init_waitqueue_head(&u->evtchn_wait);
+
+ if ( (u->ring = (u16 *)__get_free_page(GFP_KERNEL)) == NULL )
+ {
+ kfree(u);
+ return -ENOMEM;
+ }
+
+ filp->private_data = u;
+
+ return 0;
+ }
+
+ static int evtchn_release(struct inode *inode, struct file *filp)
+ {
+ int i;
+ struct per_user_data *u = filp->private_data;
+
+ spin_lock_irq(&port_user_lock);
+
+ free_page((unsigned long)u->ring);
+
+ for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
+ {
+ if ( port_user[i] == u )
+ {
+ port_user[i] = NULL;
+ mask_evtchn(i);
+ }
+ }
+
+ spin_unlock_irq(&port_user_lock);
+
+ return 0;
+ }
+
+ static struct file_operations evtchn_fops = {
+ owner: THIS_MODULE,
+ read: evtchn_read,
+ write: evtchn_write,
+ ioctl: evtchn_ioctl,
+ poll: evtchn_poll,
+ fasync: evtchn_fasync,
+ open: evtchn_open,
+ release: evtchn_release
+ };
+
+ static struct miscdevice evtchn_miscdev = {
+ .minor = EVTCHN_MINOR,
+ .name = "evtchn",
+ .fops = &evtchn_fops,
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ .devfs_name = "misc/evtchn",
+ #endif
+ };
+
+ static int __init evtchn_init(void)
+ {
+ #ifdef OLD_DEVFS
+ devfs_handle_t symlink_handle;
+ int pos;
+ char link_dest[64];
+ #endif
+ int err;
+
+ spin_lock_init(&port_user_lock);
+ memset(port_user, 0, sizeof(port_user));
+
+ /* (DEVFS) create '/dev/misc/evtchn'. */
+ err = misc_register(&evtchn_miscdev);
+ if ( err != 0 )
+ {
+ printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
+ return err;
+ }
+
+ #ifdef OLD_DEVFS
+ /* (DEVFS) create directory '/dev/xen'. */
+ xen_dev_dir = devfs_mk_dir(NULL, "xen", NULL);
+
+ /* (DEVFS) &link_dest[pos] == '../misc/evtchn'. */
+ pos = devfs_generate_path(evtchn_miscdev.devfs_handle,
+ &link_dest[3],
+ sizeof(link_dest) - 3);
+ if ( pos >= 0 )
+ strncpy(&link_dest[pos], "../", 3);
+
+ /* (DEVFS) symlink '/dev/xen/evtchn' -> '../misc/evtchn'. */
+ (void)devfs_mk_symlink(xen_dev_dir,
+ "evtchn",
+ DEVFS_FL_DEFAULT,
+ &link_dest[pos],
+ &symlink_handle,
+ NULL);
+
+ /* (DEVFS) automatically destroy the symlink with its destination. */
+ devfs_auto_unregister(evtchn_miscdev.devfs_handle, symlink_handle);
+ #endif
+
+ printk("Event-channel device installed.\n");
+
+ return 0;
+ }
+
+ static void evtchn_cleanup(void)
+ {
+ misc_deregister(&evtchn_miscdev);
+ }
+
+ module_init(evtchn_init);
+ module_exit(evtchn_cleanup);
--- /dev/null
+ /******************************************************************************
+ * arch/xen/drivers/netif/backend/common.h
+ */
+
+ #ifndef __NETIF__BACKEND__COMMON_H__
+ #define __NETIF__BACKEND__COMMON_H__
+
+ #include <linux/config.h>
+ #include <linux/version.h>
+ #include <linux/module.h>
+ #include <linux/interrupt.h>
+ #include <linux/slab.h>
+ #include <linux/ip.h>
+ #include <linux/in.h>
+ #include <linux/netdevice.h>
+ #include <linux/etherdevice.h>
+ #include <asm-xen/ctrl_if.h>
+ #include <asm-xen/xen-public/io/netif.h>
+ #include <asm/io.h>
+ #include <asm/pgalloc.h>
+
+ #if 0
+ #define ASSERT(_p) \
+ if ( !(_p) ) { printk("Assertion '%s' failed, line %d, file %s", #_p , \
+ __LINE__, __FILE__); *(int*)0=0; }
+ #define DPRINTK(_f, _a...) printk(KERN_ALERT "(file=%s, line=%d) " _f, \
+ __FILE__ , __LINE__ , ## _a )
+ #else
+ #define ASSERT(_p) ((void)0)
+ #define DPRINTK(_f, _a...) ((void)0)
+ #endif
+
+ typedef struct netif_st {
+ /* Unique identifier for this interface. */
+ domid_t domid;
+ unsigned int handle;
+
++ u8 fe_dev_addr[6];
++
+ /* Physical parameters of the comms window. */
+ unsigned long tx_shmem_frame;
+ unsigned long rx_shmem_frame;
+ unsigned int evtchn;
+ int irq;
+
+ /* The shared rings and indexes. */
+ netif_tx_interface_t *tx;
+ netif_rx_interface_t *rx;
+
+ /* Private indexes into shared ring. */
+ NETIF_RING_IDX rx_req_cons;
+ NETIF_RING_IDX rx_resp_prod; /* private version of shared variable */
+ NETIF_RING_IDX tx_req_cons;
+ NETIF_RING_IDX tx_resp_prod; /* private version of shared variable */
+
+ /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
+ unsigned long credit_bytes;
+ unsigned long credit_usec;
+ unsigned long remaining_credit;
+ struct timer_list credit_timeout;
+
+ /* Miscellaneous private stuff. */
+ enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
+ int active;
+ /*
+ * DISCONNECT response is deferred until pending requests are ack'ed.
+ * We therefore need to store the id from the original request.
+ */
+ u8 disconnect_rspid;
+ struct netif_st *hash_next;
+ struct list_head list; /* scheduling list */
+ atomic_t refcnt;
+ struct net_device *dev;
+ struct net_device_stats stats;
+
+ struct work_struct work;
+ } netif_t;
+
+ void netif_create(netif_be_create_t *create);
+ void netif_destroy(netif_be_destroy_t *destroy);
++void netif_creditlimit(netif_be_creditlimit_t *creditlimit);
+ void netif_connect(netif_be_connect_t *connect);
+ int netif_disconnect(netif_be_disconnect_t *disconnect, u8 rsp_id);
+ void netif_disconnect_complete(netif_t *netif);
+ netif_t *netif_find_by_handle(domid_t domid, unsigned int handle);
+ #define netif_get(_b) (atomic_inc(&(_b)->refcnt))
+ #define netif_put(_b) \
+ do { \
+ if ( atomic_dec_and_test(&(_b)->refcnt) ) \
+ netif_disconnect_complete(_b); \
+ } while (0)
+
+ void netif_interface_init(void);
+ void netif_ctrlif_init(void);
+
+ void netif_schedule_work(netif_t *netif);
+ void netif_deschedule_work(netif_t *netif);
+
+ int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev);
+ struct net_device_stats *netif_be_get_stats(struct net_device *dev);
+ irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
+
+ #endif /* __NETIF__BACKEND__COMMON_H__ */
--- /dev/null
- break;
+ /******************************************************************************
+ * arch/xen/drivers/netif/backend/control.c
+ *
+ * Routines for interfacing with the control plane.
+ *
+ * Copyright (c) 2004, Keir Fraser
+ */
+
+ #include "common.h"
+
+ static void netif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
+ {
+ switch ( msg->subtype )
+ {
+ case CMSG_NETIF_BE_CREATE:
+ if ( msg->length != sizeof(netif_be_create_t) )
+ goto parse_error;
+ netif_create((netif_be_create_t *)&msg->msg[0]);
+ break;
+ case CMSG_NETIF_BE_DESTROY:
+ if ( msg->length != sizeof(netif_be_destroy_t) )
+ goto parse_error;
+ netif_destroy((netif_be_destroy_t *)&msg->msg[0]);
- break;
++ break;
++ case CMSG_NETIF_BE_CREDITLIMIT:
++ if ( msg->length != sizeof(netif_be_creditlimit_t) )
++ goto parse_error;
++ netif_creditlimit((netif_be_creditlimit_t *)&msg->msg[0]);
++ break;
+ case CMSG_NETIF_BE_CONNECT:
+ if ( msg->length != sizeof(netif_be_connect_t) )
+ goto parse_error;
+ netif_connect((netif_be_connect_t *)&msg->msg[0]);
++ break;
+ case CMSG_NETIF_BE_DISCONNECT:
+ if ( msg->length != sizeof(netif_be_disconnect_t) )
+ goto parse_error;
+ if ( !netif_disconnect((netif_be_disconnect_t *)&msg->msg[0],msg->id) )
+ return; /* Sending the response is deferred until later. */
+ break;
+ default:
+ goto parse_error;
+ }
+
+ ctrl_if_send_response(msg);
+ return;
+
+ parse_error:
+ DPRINTK("Parse error while reading message subtype %d, len %d\n",
+ msg->subtype, msg->length);
+ msg->length = 0;
+ ctrl_if_send_response(msg);
+ }
+
+ void netif_ctrlif_init(void)
+ {
+ ctrl_msg_t cmsg;
+ netif_be_driver_status_t st;
+
+ (void)ctrl_if_register_receiver(CMSG_NETIF_BE, netif_ctrlif_rx,
+ CALLBACK_IN_BLOCKING_CONTEXT);
+
+ /* Send a driver-UP notification to the domain controller. */
+ cmsg.type = CMSG_NETIF_BE;
+ cmsg.subtype = CMSG_NETIF_BE_DRIVER_STATUS;
+ cmsg.length = sizeof(netif_be_driver_status_t);
+ st.status = NETIF_DRIVER_STATUS_UP;
+ memcpy(cmsg.msg, &st, sizeof(st));
+ ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+ }
--- /dev/null
- * Copyright (c) 2004, Keir Fraser
+ /******************************************************************************
+ * arch/xen/drivers/netif/backend/interface.c
+ *
+ * Network-device interface management.
+ *
- /*init_ac_timer(&new_vif->credit_timeout);*/
++ * Copyright (c) 2004-2005, Keir Fraser
+ */
+
+ #include "common.h"
+ #include <linux/rtnetlink.h>
+
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ #define VMALLOC_VMADDR(x) ((unsigned long)(x))
+ #endif
+
+ #define NETIF_HASHSZ 1024
+ #define NETIF_HASH(_d,_h) (((int)(_d)^(int)(_h))&(NETIF_HASHSZ-1))
+
+ static netif_t *netif_hash[NETIF_HASHSZ];
+
+ netif_t *netif_find_by_handle(domid_t domid, unsigned int handle)
+ {
+ netif_t *netif = netif_hash[NETIF_HASH(domid, handle)];
+ while ( (netif != NULL) &&
+ ((netif->domid != domid) || (netif->handle != handle)) )
+ netif = netif->hash_next;
+ return netif;
+ }
+
+ static void __netif_up(netif_t *netif)
+ {
+ struct net_device *dev = netif->dev;
+ spin_lock_bh(&dev->xmit_lock);
+ netif->active = 1;
+ spin_unlock_bh(&dev->xmit_lock);
+ (void)request_irq(netif->irq, netif_be_int, 0, dev->name, netif);
+ netif_schedule_work(netif);
+ }
+
+ static void __netif_down(netif_t *netif)
+ {
+ struct net_device *dev = netif->dev;
+ spin_lock_bh(&dev->xmit_lock);
+ netif->active = 0;
+ spin_unlock_bh(&dev->xmit_lock);
+ free_irq(netif->irq, netif);
+ netif_deschedule_work(netif);
+ }
+
+ static int net_open(struct net_device *dev)
+ {
+ netif_t *netif = netdev_priv(dev);
+ if ( netif->status == CONNECTED )
+ __netif_up(netif);
+ netif_start_queue(dev);
+ return 0;
+ }
+
+ static int net_close(struct net_device *dev)
+ {
+ netif_t *netif = netdev_priv(dev);
+ netif_stop_queue(dev);
+ if ( netif->status == CONNECTED )
+ __netif_down(netif);
+ return 0;
+ }
+
+ static void __netif_disconnect_complete(void *arg)
+ {
+ netif_t *netif = (netif_t *)arg;
+ ctrl_msg_t cmsg;
+ netif_be_disconnect_t disc;
+
+ /*
+ * These can't be done in netif_disconnect() because at that point there
+ * may be outstanding requests in the network stack whose asynchronous
+ * responses must still be notified to the remote driver.
+ */
+ unbind_evtchn_from_irq(netif->evtchn);
+ vfree(netif->tx); /* Frees netif->rx as well. */
+
+ /* Construct the deferred response message. */
+ cmsg.type = CMSG_NETIF_BE;
+ cmsg.subtype = CMSG_NETIF_BE_DISCONNECT;
+ cmsg.id = netif->disconnect_rspid;
+ cmsg.length = sizeof(netif_be_disconnect_t);
+ disc.domid = netif->domid;
+ disc.netif_handle = netif->handle;
+ disc.status = NETIF_BE_STATUS_OKAY;
+ memcpy(cmsg.msg, &disc, sizeof(disc));
+
+ /*
+ * Make sure message is constructed /before/ status change, because
+ * after the status change the 'netif' structure could be deallocated at
+ * any time. Also make sure we send the response /after/ status change,
+ * as otherwise a subsequent CONNECT request could spuriously fail if
+ * another CPU doesn't see the status change yet.
+ */
+ mb();
+ if ( netif->status != DISCONNECTING )
+ BUG();
+ netif->status = DISCONNECTED;
+ mb();
+
+ /* Send the successful response. */
+ ctrl_if_send_response(&cmsg);
+ }
+
+ void netif_disconnect_complete(netif_t *netif)
+ {
+ INIT_WORK(&netif->work, __netif_disconnect_complete, (void *)netif);
+ schedule_work(&netif->work);
+ }
+
+ void netif_create(netif_be_create_t *create)
+ {
+ int err = 0;
+ domid_t domid = create->domid;
+ unsigned int handle = create->netif_handle;
+ struct net_device *dev;
+ netif_t **pnetif, *netif;
+ char name[IFNAMSIZ];
+
+ snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
+ dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
+ if ( dev == NULL )
+ {
+ DPRINTK("Could not create netif: out of memory\n");
+ create->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
+ return;
+ }
+
+ netif = netdev_priv(dev);
+ memset(netif, 0, sizeof(*netif));
+ netif->domid = domid;
+ netif->handle = handle;
+ netif->status = DISCONNECTED;
+ atomic_set(&netif->refcnt, 0);
+ netif->dev = dev;
+
+ netif->credit_bytes = netif->remaining_credit = ~0UL;
+ netif->credit_usec = 0UL;
- /*
- * Initialise a dummy MAC address. We choose the numerically largest
- * non-broadcast address to prevent the address getting stolen by an
- * Ethernet bridge for STP purposes. (FE:FF:FF:FF:FF:FF)
- */
- memset(dev->dev_addr, 0xFF, ETH_ALEN);
- dev->dev_addr[0] &= ~0x01;
++ init_timer(&netif->credit_timeout);
+
+ pnetif = &netif_hash[NETIF_HASH(domid, handle)];
+ while ( *pnetif != NULL )
+ {
+ if ( ((*pnetif)->domid == domid) && ((*pnetif)->handle == handle) )
+ {
+ DPRINTK("Could not create netif: already exists\n");
+ create->status = NETIF_BE_STATUS_INTERFACE_EXISTS;
+ free_netdev(dev);
+ return;
+ }
+ pnetif = &(*pnetif)->hash_next;
+ }
+
+ dev->hard_start_xmit = netif_be_start_xmit;
+ dev->get_stats = netif_be_get_stats;
+ dev->open = net_open;
+ dev->stop = net_close;
+
+ /* Disable queuing. */
+ dev->tx_queue_len = 0;
+
-#if 0
- struct net_device *eth0_dev;
-#endif
++ if ( (create->be_mac[0] == 0) && (create->be_mac[1] == 0) &&
++ (create->be_mac[2] == 0) && (create->be_mac[3] == 0) &&
++ (create->be_mac[4] == 0) && (create->be_mac[5] == 0) )
++ {
++ /*
++ * Initialise a dummy MAC address. We choose the numerically largest
++ * non-broadcast address to prevent the address getting stolen by an
++ * Ethernet bridge for STP purposes. (FE:FF:FF:FF:FF:FF)
++ */
++ memset(dev->dev_addr, 0xFF, ETH_ALEN);
++ dev->dev_addr[0] &= ~0x01;
++ }
++ else
++ {
++ memcpy(dev->dev_addr, create->be_mac, ETH_ALEN);
++ }
++
++ memcpy(netif->fe_dev_addr, create->mac, ETH_ALEN);
+
+ rtnl_lock();
+ err = register_netdevice(dev);
+ rtnl_unlock();
+
+ if ( err != 0 )
+ {
+ DPRINTK("Could not register new net device %s: err=%d\n",
+ dev->name, err);
+ create->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
+ free_netdev(dev);
+ return;
+ }
+
+ netif->hash_next = *pnetif;
+ *pnetif = netif;
+
+ DPRINTK("Successfully created netif\n");
+ create->status = NETIF_BE_STATUS_OKAY;
+ }
+
+ void netif_destroy(netif_be_destroy_t *destroy)
+ {
+ domid_t domid = destroy->domid;
+ unsigned int handle = destroy->netif_handle;
+ netif_t **pnetif, *netif;
+
+ pnetif = &netif_hash[NETIF_HASH(domid, handle)];
+ while ( (netif = *pnetif) != NULL )
+ {
+ if ( (netif->domid == domid) && (netif->handle == handle) )
+ {
+ if ( netif->status != DISCONNECTED )
+ goto still_connected;
+ goto destroy;
+ }
+ pnetif = &netif->hash_next;
+ }
+
+ destroy->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return;
+
+ still_connected:
+ destroy->status = NETIF_BE_STATUS_INTERFACE_CONNECTED;
+ return;
+
+ destroy:
+ *pnetif = netif->hash_next;
+ unregister_netdev(netif->dev);
+ free_netdev(netif->dev);
+ destroy->status = NETIF_BE_STATUS_OKAY;
+ }
+
++void netif_creditlimit(netif_be_creditlimit_t *creditlimit)
++{
++ domid_t domid = creditlimit->domid;
++ unsigned int handle = creditlimit->netif_handle;
++ netif_t *netif;
++
++ netif = netif_find_by_handle(domid, handle);
++ if ( unlikely(netif == NULL) )
++ {
++ DPRINTK("netif_creditlimit attempted for non-existent netif"
++ " (%u,%u)\n", creditlimit->domid, creditlimit->netif_handle);
++ creditlimit->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND;
++ return;
++ }
++
++ /* Set the credit limit (reset remaining credit to new limit). */
++ netif->credit_bytes = netif->remaining_credit = creditlimit->credit_bytes;
++ netif->credit_usec = creditlimit->period_usec;
++
++ if ( netif->status == CONNECTED )
++ {
++ /*
++ * Schedule work so that any packets waiting under previous credit
++ * limit are dealt with (acts like a replenishment point).
++ */
++ netif->credit_timeout.expires = jiffies;
++ netif_schedule_work(netif);
++ }
++
++ creditlimit->status = NETIF_BE_STATUS_OKAY;
++}
++
+ void netif_connect(netif_be_connect_t *connect)
+ {
+ domid_t domid = connect->domid;
+ unsigned int handle = connect->netif_handle;
+ unsigned int evtchn = connect->evtchn;
+ unsigned long tx_shmem_frame = connect->tx_shmem_frame;
+ unsigned long rx_shmem_frame = connect->rx_shmem_frame;
+ struct vm_struct *vma;
+ pgprot_t prot;
+ int error;
+ netif_t *netif;
+
+ netif = netif_find_by_handle(domid, handle);
+ if ( unlikely(netif == NULL) )
+ {
+ DPRINTK("netif_connect attempted for non-existent netif (%u,%u)\n",
+ connect->domid, connect->netif_handle);
+ connect->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return;
+ }
+
+ if ( netif->status != DISCONNECTED )
+ {
+ connect->status = NETIF_BE_STATUS_INTERFACE_CONNECTED;
+ return;
+ }
+
+ if ( (vma = get_vm_area(2*PAGE_SIZE, VM_IOREMAP)) == NULL )
+ {
+ connect->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
+ return;
+ }
+
+ prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED);
+ error = direct_remap_area_pages(&init_mm,
+ VMALLOC_VMADDR(vma->addr),
+ tx_shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
+ prot, domid);
+ error |= direct_remap_area_pages(&init_mm,
+ VMALLOC_VMADDR(vma->addr) + PAGE_SIZE,
+ rx_shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
+ prot, domid);
+ if ( error != 0 )
+ {
+ if ( error == -ENOMEM )
+ connect->status = NETIF_BE_STATUS_OUT_OF_MEMORY;
+ else if ( error == -EFAULT )
+ connect->status = NETIF_BE_STATUS_MAPPING_ERROR;
+ else
+ connect->status = NETIF_BE_STATUS_ERROR;
+ vfree(vma->addr);
+ return;
+ }
+
+ netif->evtchn = evtchn;
+ netif->irq = bind_evtchn_to_irq(evtchn);
+ netif->tx_shmem_frame = tx_shmem_frame;
+ netif->rx_shmem_frame = rx_shmem_frame;
+ netif->tx =
+ (netif_tx_interface_t *)vma->addr;
+ netif->rx =
+ (netif_rx_interface_t *)((char *)vma->addr + PAGE_SIZE);
+ netif->tx->resp_prod = netif->rx->resp_prod = 0;
+ netif_get(netif);
+ wmb(); /* Other CPUs see new state before interface is started. */
+
+ rtnl_lock();
+ netif->status = CONNECTED;
+ wmb();
+ if ( netif_running(netif->dev) )
+ __netif_up(netif);
+ rtnl_unlock();
+
+ connect->status = NETIF_BE_STATUS_OKAY;
+ }
+
+ int netif_disconnect(netif_be_disconnect_t *disconnect, u8 rsp_id)
+ {
+ domid_t domid = disconnect->domid;
+ unsigned int handle = disconnect->netif_handle;
+ netif_t *netif;
+
+ netif = netif_find_by_handle(domid, handle);
+ if ( unlikely(netif == NULL) )
+ {
+ DPRINTK("netif_disconnect attempted for non-existent netif"
+ " (%u,%u)\n", disconnect->domid, disconnect->netif_handle);
+ disconnect->status = NETIF_BE_STATUS_INTERFACE_NOT_FOUND;
+ return 1; /* Caller will send response error message. */
+ }
+
+ if ( netif->status == CONNECTED )
+ {
+ rtnl_lock();
+ netif->status = DISCONNECTING;
+ netif->disconnect_rspid = rsp_id;
+ wmb();
+ if ( netif_running(netif->dev) )
+ __netif_down(netif);
+ rtnl_unlock();
+ netif_put(netif);
+ return 0; /* Caller should not send response message. */
+ }
+
+ disconnect->status = NETIF_BE_STATUS_OKAY;
+ return 1;
+ }
+
+ void netif_interface_init(void)
+ {
+ memset(netif_hash, 0, sizeof(netif_hash));
+ }
--- /dev/null
- * Copyright (c) 2002-2004, K A Fraser
+ /******************************************************************************
+ * drivers/xen/netback/netback.c
+ *
+ * Back-end of the driver for virtual network devices. This portion of the
+ * driver exports a 'unified' network-device interface that can be accessed
+ * by any operating system that implements a compatible front end. A
+ * reference front-end implementation can be found in:
+ * drivers/xen/netfront/netfront.c
+ *
- mcl[0].args[0] = vdata >> PAGE_SHIFT;
++ * Copyright (c) 2002-2005, K A Fraser
+ */
+
+ #include "common.h"
+ #include <asm-xen/balloon.h>
++#include <asm-xen/evtchn.h>
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++#include <linux/delay.h>
++#endif
+
+ static void netif_idx_release(u16 pending_idx);
+ static void netif_page_release(struct page *page);
+ static void make_tx_response(netif_t *netif,
+ u16 id,
+ s8 st);
+ static int make_rx_response(netif_t *netif,
+ u16 id,
+ s8 st,
+ memory_t addr,
+ u16 size);
+
+ static void net_tx_action(unsigned long unused);
+ static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
+
+ static void net_rx_action(unsigned long unused);
+ static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
+
+ static struct timer_list net_timer;
+
+ static struct sk_buff_head rx_queue;
+ static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE*2];
+ static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE*3];
+ static unsigned char rx_notify[NR_EVENT_CHANNELS];
+
+ /* Don't currently gate addition of an interface to the tx scheduling list. */
+ #define tx_work_exists(_if) (1)
+
+ #define MAX_PENDING_REQS 256
+ static unsigned long mmap_vstart;
+ #define MMAP_VADDR(_req) (mmap_vstart + ((_req) * PAGE_SIZE))
+
+ #define PKT_PROT_LEN 64
+
+ static struct {
+ netif_tx_request_t req;
+ netif_t *netif;
+ } pending_tx_info[MAX_PENDING_REQS];
+ static u16 pending_ring[MAX_PENDING_REQS];
+ typedef unsigned int PEND_RING_IDX;
+ #define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
+ static PEND_RING_IDX pending_prod, pending_cons;
+ #define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
+
+ /* Freed TX SKBs get batched on this ring before return to pending_ring. */
+ static u16 dealloc_ring[MAX_PENDING_REQS];
+ static PEND_RING_IDX dealloc_prod, dealloc_cons;
+
+ static struct sk_buff_head tx_queue;
+ static multicall_entry_t tx_mcl[MAX_PENDING_REQS];
+
+ static struct list_head net_schedule_list;
+ static spinlock_t net_schedule_list_lock;
+
+ #define MAX_MFN_ALLOC 64
+ static unsigned long mfn_list[MAX_MFN_ALLOC];
+ static unsigned int alloc_index = 0;
+ static spinlock_t mfn_lock = SPIN_LOCK_UNLOCKED;
+
+ static unsigned long alloc_mfn(void)
+ {
+ unsigned long mfn = 0, flags;
+ spin_lock_irqsave(&mfn_lock, flags);
+ if ( unlikely(alloc_index == 0) )
+ alloc_index = HYPERVISOR_dom_mem_op(
+ MEMOP_increase_reservation, mfn_list, MAX_MFN_ALLOC, 0);
+ if ( alloc_index != 0 )
+ mfn = mfn_list[--alloc_index];
+ spin_unlock_irqrestore(&mfn_lock, flags);
+ return mfn;
+ }
+
+ static void free_mfn(unsigned long mfn)
+ {
+ unsigned long flags;
+ spin_lock_irqsave(&mfn_lock, flags);
+ if ( alloc_index != MAX_MFN_ALLOC )
+ mfn_list[alloc_index++] = mfn;
+ else if ( HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
+ &mfn, 1, 0) != 1 )
+ BUG();
+ spin_unlock_irqrestore(&mfn_lock, flags);
+ }
+
+ static inline void maybe_schedule_tx_action(void)
+ {
+ smp_mb();
+ if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
+ !list_empty(&net_schedule_list) )
+ tasklet_schedule(&net_tx_tasklet);
+ }
+
+ /*
+ * A gross way of confirming the origin of an skb data page. The slab
+ * allocator abuses a field in the page struct to cache the kmem_cache_t ptr.
+ */
+ static inline int is_xen_skb(struct sk_buff *skb)
+ {
+ extern kmem_cache_t *skbuff_cachep;
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->lru.next;
+ #else
+ kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->list.next;
+ #endif
+ return (cp == skbuff_cachep);
+ }
+
+ int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ netif_t *netif = netdev_priv(dev);
+
+ ASSERT(skb->dev == dev);
+
+ /* Drop the packet if the target domain has no receive buffers. */
+ if ( !netif->active ||
+ (netif->rx_req_cons == netif->rx->req_prod) ||
+ ((netif->rx_req_cons-netif->rx_resp_prod) == NETIF_RX_RING_SIZE) )
+ goto drop;
+
+ /*
+ * We do not copy the packet unless:
+ * 1. The data is shared; or
+ * 2. The data is not allocated from our special cache.
+ * NB. We also couldn't cope with fragmented packets, but we won't get
+ * any because we not advertise the NETIF_F_SG feature.
+ */
+ if ( skb_shared(skb) || skb_cloned(skb) || !is_xen_skb(skb) )
+ {
+ int hlen = skb->data - skb->head;
+ struct sk_buff *nskb = dev_alloc_skb(hlen + skb->len);
+ if ( unlikely(nskb == NULL) )
+ goto drop;
+ skb_reserve(nskb, hlen);
+ __skb_put(nskb, skb->len);
+ (void)skb_copy_bits(skb, -hlen, nskb->data - hlen, skb->len + hlen);
+ nskb->dev = skb->dev;
+ dev_kfree_skb(skb);
+ skb = nskb;
+ }
+
+ netif->rx_req_cons++;
+ netif_get(netif);
+
+ skb_queue_tail(&rx_queue, skb);
+ tasklet_schedule(&net_rx_tasklet);
+
+ return 0;
+
+ drop:
+ netif->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ #if 0
+ static void xen_network_done_notify(void)
+ {
+ static struct net_device *eth0_dev = NULL;
+ if ( unlikely(eth0_dev == NULL) )
+ eth0_dev = __dev_get_by_name("eth0");
+ netif_rx_schedule(eth0_dev);
+ }
+ /*
+ * Add following to poll() function in NAPI driver (Tigon3 is example):
+ * if ( xen_network_done() )
+ * tg3_enable_ints(tp);
+ */
+ int xen_network_done(void)
+ {
+ return skb_queue_empty(&rx_queue);
+ }
+ #endif
+
+ static void net_rx_action(unsigned long unused)
+ {
+ netif_t *netif;
+ s8 status;
+ u16 size, id, evtchn;
+ mmu_update_t *mmu;
+ multicall_entry_t *mcl;
+ unsigned long vdata, mdata, new_mfn;
+ struct sk_buff_head rxq;
+ struct sk_buff *skb;
+ u16 notify_list[NETIF_RX_RING_SIZE];
+ int notify_nr = 0;
+
+ skb_queue_head_init(&rxq);
+
+ mcl = rx_mcl;
+ mmu = rx_mmu;
+ while ( (skb = skb_dequeue(&rx_queue)) != NULL )
+ {
+ netif = netdev_priv(skb->dev);
+ vdata = (unsigned long)skb->data;
+ mdata = virt_to_machine(vdata);
+
+ /* Memory squeeze? Back off for an arbitrary while. */
+ if ( (new_mfn = alloc_mfn()) == 0 )
+ {
+ if ( net_ratelimit() )
+ printk(KERN_WARNING "Memory squeeze in netback driver.\n");
+ mod_timer(&net_timer, jiffies + HZ);
+ skb_queue_head(&rx_queue, skb);
+ break;
+ }
+
+ /*
+ * Set the new P2M table entry before reassigning the old data page.
+ * Heed the comment in pgtable-2level.h:pte_page(). :-)
+ */
+ phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = new_mfn;
+
+ mmu[0].ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
+ mmu[0].val = __pa(vdata) >> PAGE_SHIFT;
+ mmu[1].ptr = MMU_EXTENDED_COMMAND;
+ mmu[1].val = MMUEXT_SET_FOREIGNDOM;
+ mmu[1].val |= (unsigned long)netif->domid << 16;
+ mmu[2].ptr = (mdata & PAGE_MASK) | MMU_EXTENDED_COMMAND;
+ mmu[2].val = MMUEXT_REASSIGN_PAGE;
+
+ mcl[0].op = __HYPERVISOR_update_va_mapping;
-#if 0
++ mcl[0].args[0] = vdata;
+ mcl[0].args[1] = (new_mfn << PAGE_SHIFT) | __PAGE_KERNEL;
+ mcl[0].args[2] = 0;
+ mcl[1].op = __HYPERVISOR_mmu_update;
+ mcl[1].args[0] = (unsigned long)mmu;
+ mcl[1].args[1] = 3;
+ mcl[1].args[2] = 0;
+
+ mcl += 2;
+ mmu += 3;
+
+ __skb_queue_tail(&rxq, skb);
+
+ /* Filled the batch queue? */
+ if ( (mcl - rx_mcl) == ARRAY_SIZE(rx_mcl) )
+ break;
+ }
+
+ if ( mcl == rx_mcl )
+ return;
+
+ mcl[-2].args[2] = UVMF_FLUSH_TLB;
+ if ( unlikely(HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl) != 0) )
+ BUG();
+
+ mcl = rx_mcl;
+ mmu = rx_mmu;
+ while ( (skb = __skb_dequeue(&rxq)) != NULL )
+ {
+ netif = netdev_priv(skb->dev);
+ size = skb->tail - skb->data;
+
+ /* Rederive the machine addresses. */
+ new_mfn = mcl[0].args[1] >> PAGE_SHIFT;
+ mdata = ((mmu[2].ptr & PAGE_MASK) |
+ ((unsigned long)skb->data & ~PAGE_MASK));
+
+ atomic_set(&(skb_shinfo(skb)->dataref), 1);
+ skb_shinfo(skb)->nr_frags = 0;
+ skb_shinfo(skb)->frag_list = NULL;
+
+ netif->stats.tx_bytes += size;
+ netif->stats.tx_packets++;
+
+ /* The update_va_mapping() must not fail. */
+ if ( unlikely(mcl[0].args[5] != 0) )
+ BUG();
+
+ /* Check the reassignment error code. */
+ status = NETIF_RSP_OKAY;
+ if ( unlikely(mcl[1].args[5] != 0) )
+ {
+ DPRINTK("Failed MMU update transferring to DOM%u\n", netif->domid);
+ free_mfn(mdata >> PAGE_SHIFT);
+ status = NETIF_RSP_ERROR;
+ }
+
+ evtchn = netif->evtchn;
+ id = netif->rx->ring[MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id;
+ if ( make_rx_response(netif, id, status, mdata, size) &&
+ (rx_notify[evtchn] == 0) )
+ {
+ rx_notify[evtchn] = 1;
+ notify_list[notify_nr++] = evtchn;
+ }
+
+ netif_put(netif);
+ dev_kfree_skb(skb);
+
+ mcl += 2;
+ mmu += 3;
+ }
+
+ while ( notify_nr != 0 )
+ {
+ evtchn = notify_list[--notify_nr];
+ rx_notify[evtchn] = 0;
+ notify_via_evtchn(evtchn);
+ }
+
+ /* More work to do? */
+ if ( !skb_queue_empty(&rx_queue) && !timer_pending(&net_timer) )
+ tasklet_schedule(&net_rx_tasklet);
+ #if 0
+ else
+ xen_network_done_notify();
+ #endif
+ }
+
+ static void net_alarm(unsigned long unused)
+ {
+ tasklet_schedule(&net_rx_tasklet);
+ }
+
+ struct net_device_stats *netif_be_get_stats(struct net_device *dev)
+ {
+ netif_t *netif = netdev_priv(dev);
+ return &netif->stats;
+ }
+
+ static int __on_net_schedule_list(netif_t *netif)
+ {
+ return netif->list.next != NULL;
+ }
+
+ static void remove_from_net_schedule_list(netif_t *netif)
+ {
+ spin_lock_irq(&net_schedule_list_lock);
+ if ( likely(__on_net_schedule_list(netif)) )
+ {
+ list_del(&netif->list);
+ netif->list.next = NULL;
+ netif_put(netif);
+ }
+ spin_unlock_irq(&net_schedule_list_lock);
+ }
+
+ static void add_to_net_schedule_list_tail(netif_t *netif)
+ {
+ if ( __on_net_schedule_list(netif) )
+ return;
+
+ spin_lock_irq(&net_schedule_list_lock);
+ if ( !__on_net_schedule_list(netif) && netif->active )
+ {
+ list_add_tail(&netif->list, &net_schedule_list);
+ netif_get(netif);
+ }
+ spin_unlock_irq(&net_schedule_list_lock);
+ }
+
+ void netif_schedule_work(netif_t *netif)
+ {
+ if ( (netif->tx_req_cons != netif->tx->req_prod) &&
+ ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
+ {
+ add_to_net_schedule_list_tail(netif);
+ maybe_schedule_tx_action();
+ }
+ }
+
+ void netif_deschedule_work(netif_t *netif)
+ {
+ remove_from_net_schedule_list(netif);
+ }
+
-#endif
++
+ static void tx_credit_callback(unsigned long data)
+ {
+ netif_t *netif = (netif_t *)data;
+ netif->remaining_credit = netif->credit_bytes;
+ netif_schedule_work(netif);
+ }
- mcl[0].args[0] = MMAP_VADDR(pending_idx) >> PAGE_SHIFT;
+
+ static void net_tx_action(unsigned long unused)
+ {
+ struct list_head *ent;
+ struct sk_buff *skb;
+ netif_t *netif;
+ netif_tx_request_t txreq;
+ u16 pending_idx;
+ NETIF_RING_IDX i;
+ multicall_entry_t *mcl;
+ PEND_RING_IDX dc, dp;
+ unsigned int data_len;
+
+ if ( (dc = dealloc_cons) == (dp = dealloc_prod) )
+ goto skip_dealloc;
+
+ mcl = tx_mcl;
+ while ( dc != dp )
+ {
+ pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
+ mcl[0].op = __HYPERVISOR_update_va_mapping;
- netif->tx->req_cons = ++netif->tx_req_cons;
-
- /*
- * 1. Ensure that we see the request when we copy it.
- * 2. Ensure that frontend sees updated req_cons before we check
- * for more work to schedule.
- */
- mb();
-
++ mcl[0].args[0] = MMAP_VADDR(pending_idx);
+ mcl[0].args[1] = 0;
+ mcl[0].args[2] = 0;
+ mcl++;
+ }
+
+ mcl[-1].args[2] = UVMF_FLUSH_TLB;
+ if ( unlikely(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0) )
+ BUG();
+
+ mcl = tx_mcl;
+ while ( dealloc_cons != dp )
+ {
+ /* The update_va_mapping() must not fail. */
+ if ( unlikely(mcl[0].args[5] != 0) )
+ BUG();
+
+ pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
+
+ netif = pending_tx_info[pending_idx].netif;
+
+ make_tx_response(netif, pending_tx_info[pending_idx].req.id,
+ NETIF_RSP_OKAY);
+
+ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
+
+ /*
+ * Scheduling checks must happen after the above response is posted.
+ * This avoids a possible race with a guest OS on another CPU if that
+ * guest is testing against 'resp_prod' when deciding whether to notify
+ * us when it queues additional packets.
+ */
+ mb();
+ if ( (netif->tx_req_cons != netif->tx->req_prod) &&
+ ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
+ add_to_net_schedule_list_tail(netif);
+
+ netif_put(netif);
+
+ mcl++;
+ }
+
+ skip_dealloc:
+ mcl = tx_mcl;
+ while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
+ !list_empty(&net_schedule_list) )
+ {
+ /* Get a netif from the list with work to do. */
+ ent = net_schedule_list.next;
+ netif = list_entry(ent, netif_t, list);
+ netif_get(netif);
+ remove_from_net_schedule_list(netif);
+
+ /* Work to do? */
+ i = netif->tx_req_cons;
+ if ( (i == netif->tx->req_prod) ||
+ ((i-netif->tx_resp_prod) == NETIF_TX_RING_SIZE) )
+ {
+ netif_put(netif);
+ continue;
+ }
+
-#if 0
++ rmb(); /* Ensure that we see the request before we copy it. */
+ memcpy(&txreq, &netif->tx->ring[MASK_NETIF_TX_IDX(i)].req,
+ sizeof(txreq));
+
- if ( tx.size > netif->remaining_credit )
+ /* Credit-based scheduling. */
- s_time_t now = NOW(), next_credit =
- netif->credit_timeout.expires + MICROSECS(netif->credit_usec);
- if ( next_credit <= now )
++ if ( txreq.size > netif->remaining_credit )
+ {
- else
++ unsigned long now = jiffies;
++ unsigned long next_credit =
++ netif->credit_timeout.expires +
++ msecs_to_jiffies(netif->credit_usec / 1000);
++
++ /* Timer could already be pending in some rare cases. */
++ if ( timer_pending(&netif->credit_timeout) )
++ break;
++
++ /* Already passed the point at which we can replenish credit? */
++ if ( time_after_eq(now, next_credit) )
+ {
+ netif->credit_timeout.expires = now;
+ netif->remaining_credit = netif->credit_bytes;
+ }
- netif->credit_timeout.cpu = smp_processor_id();
- add_ac_timer(&netif->credit_timeout);
++
++ /* Still too big to send right now? Then set a timer callback. */
++ if ( txreq.size > netif->remaining_credit )
+ {
+ netif->remaining_credit = 0;
+ netif->credit_timeout.expires = next_credit;
+ netif->credit_timeout.data = (unsigned long)netif;
+ netif->credit_timeout.function = tx_credit_callback;
- netif->remaining_credit -= tx.size;
-#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
++ add_timer_on(&netif->credit_timeout, smp_processor_id());
++#else
++ add_timer(&netif->credit_timeout);
++#endif
+ break;
+ }
+ }
- mcl[0].args[0] = MMAP_VADDR(pending_idx) >> PAGE_SHIFT;
++ netif->remaining_credit -= txreq.size;
++
++ /*
++ * Why the barrier? It ensures that the frontend sees updated req_cons
++ * before we check for more work to schedule.
++ */
++ netif->tx->req_cons = ++netif->tx_req_cons;
++ mb();
+
+ netif_schedule_work(netif);
+
+ if ( unlikely(txreq.size < ETH_HLEN) ||
+ unlikely(txreq.size > ETH_FRAME_LEN) )
+ {
+ DPRINTK("Bad packet size: %d\n", txreq.size);
+ make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
+ netif_put(netif);
+ continue;
+ }
+
+ /* No crossing a page boundary as the payload mustn't fragment. */
+ if ( unlikely(((txreq.addr & ~PAGE_MASK) + txreq.size) >= PAGE_SIZE) )
+ {
+ DPRINTK("txreq.addr: %lx, size: %u, end: %lu\n",
+ txreq.addr, txreq.size,
+ (txreq.addr &~PAGE_MASK) + txreq.size);
+ make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
+ netif_put(netif);
+ continue;
+ }
+
+ pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
+
+ data_len = (txreq.size > PKT_PROT_LEN) ? PKT_PROT_LEN : txreq.size;
+
+ if ( unlikely((skb = alloc_skb(data_len+16, GFP_ATOMIC)) == NULL) )
+ {
+ DPRINTK("Can't allocate a skb in start_xmit.\n");
+ make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
+ netif_put(netif);
+ break;
+ }
+
+ /* Packets passed to netif_rx() must have some headroom. */
+ skb_reserve(skb, 16);
+
+ mcl[0].op = __HYPERVISOR_update_va_mapping_otherdomain;
++ mcl[0].args[0] = MMAP_VADDR(pending_idx);
+ mcl[0].args[1] = (txreq.addr & PAGE_MASK) | __PAGE_KERNEL;
+ mcl[0].args[2] = 0;
+ mcl[0].args[3] = netif->domid;
+ mcl++;
+
+ memcpy(&pending_tx_info[pending_idx].req, &txreq, sizeof(txreq));
+ pending_tx_info[pending_idx].netif = netif;
+ *((u16 *)skb->data) = pending_idx;
+
+ __skb_queue_tail(&tx_queue, skb);
+
+ pending_cons++;
+
+ /* Filled the batch queue? */
+ if ( (mcl - tx_mcl) == ARRAY_SIZE(tx_mcl) )
+ break;
+ }
+
+ if ( mcl == tx_mcl )
+ return;
+
+ if ( unlikely(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0) )
+ BUG();
+
+ mcl = tx_mcl;
+ while ( (skb = __skb_dequeue(&tx_queue)) != NULL )
+ {
+ pending_idx = *((u16 *)skb->data);
+ netif = pending_tx_info[pending_idx].netif;
+ memcpy(&txreq, &pending_tx_info[pending_idx].req, sizeof(txreq));
+
+ /* Check the remap error code. */
+ if ( unlikely(mcl[0].args[5] != 0) )
+ {
+ DPRINTK("Bad page frame\n");
+ make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
+ netif_put(netif);
+ kfree_skb(skb);
+ mcl++;
+ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
+ continue;
+ }
+
+ phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
+ FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT);
+
+ data_len = (txreq.size > PKT_PROT_LEN) ? PKT_PROT_LEN : txreq.size;
+
+ __skb_put(skb, data_len);
+ memcpy(skb->data,
+ (void *)(MMAP_VADDR(pending_idx)|(txreq.addr&~PAGE_MASK)),
+ data_len);
+
+ if ( data_len < txreq.size )
+ {
+ /* Append the packet payload as a fragment. */
+ skb_shinfo(skb)->frags[0].page =
+ virt_to_page(MMAP_VADDR(pending_idx));
+ skb_shinfo(skb)->frags[0].size = txreq.size - data_len;
+ skb_shinfo(skb)->frags[0].page_offset =
+ (txreq.addr + data_len) & ~PAGE_MASK;
+ skb_shinfo(skb)->nr_frags = 1;
+ }
+ else
+ {
+ /* Schedule a response immediately. */
+ netif_idx_release(pending_idx);
+ }
+
+ skb->data_len = txreq.size - data_len;
+ skb->len += skb->data_len;
+
+ skb->dev = netif->dev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ netif->stats.rx_bytes += txreq.size;
+ netif->stats.rx_packets++;
+
+ netif_rx(skb);
+ netif->dev->last_rx = jiffies;
+
+ mcl++;
+ }
+ }
+
+ static void netif_idx_release(u16 pending_idx)
+ {
+ static spinlock_t _lock = SPIN_LOCK_UNLOCKED;
+ unsigned long flags;
+
+ spin_lock_irqsave(&_lock, flags);
+ dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
+ spin_unlock_irqrestore(&_lock, flags);
+
+ tasklet_schedule(&net_tx_tasklet);
+ }
+
+ static void netif_page_release(struct page *page)
+ {
+ u16 pending_idx = page - virt_to_page(mmap_vstart);
+
+ /* Ready for next use. */
+ set_page_count(page, 1);
+
+ netif_idx_release(pending_idx);
+ }
+
+ irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
+ {
+ netif_t *netif = dev_id;
+ if ( tx_work_exists(netif) )
+ {
+ add_to_net_schedule_list_tail(netif);
+ maybe_schedule_tx_action();
+ }
+ return IRQ_HANDLED;
+ }
+
+ static void make_tx_response(netif_t *netif,
+ u16 id,
+ s8 st)
+ {
+ NETIF_RING_IDX i = netif->tx_resp_prod;
+ netif_tx_response_t *resp;
+
+ resp = &netif->tx->ring[MASK_NETIF_TX_IDX(i)].resp;
+ resp->id = id;
+ resp->status = st;
+ wmb();
+ netif->tx->resp_prod = netif->tx_resp_prod = ++i;
+
+ mb(); /* Update producer before checking event threshold. */
+ if ( i == netif->tx->event )
+ notify_via_evtchn(netif->evtchn);
+ }
+
+ static int make_rx_response(netif_t *netif,
+ u16 id,
+ s8 st,
+ memory_t addr,
+ u16 size)
+ {
+ NETIF_RING_IDX i = netif->rx_resp_prod;
+ netif_rx_response_t *resp;
+
+ resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
+ resp->addr = addr;
+ resp->id = id;
+ resp->status = (s16)size;
+ if ( st < 0 )
+ resp->status = (s16)st;
+ wmb();
+ netif->rx->resp_prod = netif->rx_resp_prod = ++i;
+
+ mb(); /* Update producer before checking event threshold. */
+ return (i == netif->rx->event);
+ }
+
+ static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
+ {
+ struct list_head *ent;
+ netif_t *netif;
+ int i = 0;
+
+ printk(KERN_ALERT "netif_schedule_list:\n");
+ spin_lock_irq(&net_schedule_list_lock);
+
+ list_for_each ( ent, &net_schedule_list )
+ {
+ netif = list_entry(ent, netif_t, list);
+ printk(KERN_ALERT " %d: private(rx_req_cons=%08x rx_resp_prod=%08x\n",
+ i, netif->rx_req_cons, netif->rx_resp_prod);
+ printk(KERN_ALERT " tx_req_cons=%08x tx_resp_prod=%08x)\n",
+ netif->tx_req_cons, netif->tx_resp_prod);
+ printk(KERN_ALERT " shared(rx_req_prod=%08x rx_resp_prod=%08x\n",
+ netif->rx->req_prod, netif->rx->resp_prod);
+ printk(KERN_ALERT " rx_event=%08x tx_req_prod=%08x\n",
+ netif->rx->event, netif->tx->req_prod);
+ printk(KERN_ALERT " tx_resp_prod=%08x, tx_event=%08x)\n",
+ netif->tx->resp_prod, netif->tx->event);
+ i++;
+ }
+
+ spin_unlock_irq(&net_schedule_list_lock);
+ printk(KERN_ALERT " ** End of netif_schedule_list **\n");
+
+ return IRQ_HANDLED;
+ }
+
+ static int __init netback_init(void)
+ {
+ int i;
+ struct page *page;
+
+ if ( !(xen_start_info.flags & SIF_NET_BE_DOMAIN) &&
+ !(xen_start_info.flags & SIF_INITDOMAIN) )
+ return 0;
+
+ printk("Initialising Xen netif backend\n");
+
+ /* We can increase reservation by this much in net_rx_action(). */
+ balloon_update_driver_allowance(NETIF_RX_RING_SIZE);
+
+ skb_queue_head_init(&rx_queue);
+ skb_queue_head_init(&tx_queue);
+
+ init_timer(&net_timer);
+ net_timer.data = 0;
+ net_timer.function = net_alarm;
+
+ netif_interface_init();
+
+ if ( (mmap_vstart = allocate_empty_lowmem_region(MAX_PENDING_REQS)) == 0 )
+ BUG();
+
+ for ( i = 0; i < MAX_PENDING_REQS; i++ )
+ {
+ page = virt_to_page(MMAP_VADDR(i));
+ set_page_count(page, 1);
+ SetPageForeign(page, netif_page_release);
+ }
+
+ pending_cons = 0;
+ pending_prod = MAX_PENDING_REQS;
+ for ( i = 0; i < MAX_PENDING_REQS; i++ )
+ pending_ring[i] = i;
+
+ spin_lock_init(&net_schedule_list_lock);
+ INIT_LIST_HEAD(&net_schedule_list);
+
+ netif_ctrlif_init();
+
+ (void)request_irq(bind_virq_to_irq(VIRQ_DEBUG),
+ netif_be_dbg, SA_SHIRQ,
+ "net-be-dbg", &netif_be_dbg);
+
+ return 0;
+ }
+
+ static void netback_cleanup(void)
+ {
+ BUG();
+ }
+
+ module_init(netback_init);
+ module_exit(netback_cleanup);
--- /dev/null
- rx_mcl[i].args[0] = (unsigned long)skb->head >> PAGE_SHIFT;
+ /******************************************************************************
+ * Virtual network driver for conversing with remote driver backends.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+ #include <linux/config.h>
+ #include <linux/module.h>
+ #include <linux/version.h>
+ #include <linux/kernel.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/string.h>
+ #include <linux/errno.h>
+ #include <linux/netdevice.h>
+ #include <linux/inetdevice.h>
+ #include <linux/etherdevice.h>
+ #include <linux/skbuff.h>
+ #include <linux/init.h>
+ #include <linux/bitops.h>
+ #include <net/sock.h>
+ #include <net/pkt_sched.h>
+ #include <net/arp.h>
+ #include <net/route.h>
+ #include <asm/io.h>
+ #include <asm-xen/evtchn.h>
+ #include <asm-xen/ctrl_if.h>
+ #include <asm-xen/xen-public/io/netif.h>
+ #include <asm-xen/balloon.h>
+ #include <asm/page.h>
+
+ #ifndef __GFP_NOWARN
+ #define __GFP_NOWARN 0
+ #endif
+ #define alloc_xen_skb(_l) __dev_alloc_skb((_l), GFP_ATOMIC|__GFP_NOWARN)
+
+ #define init_skb_shinfo(_skb) \
+ do { \
+ atomic_set(&(skb_shinfo(_skb)->dataref), 1); \
+ skb_shinfo(_skb)->nr_frags = 0; \
+ skb_shinfo(_skb)->frag_list = NULL; \
+ } while (0)
+
+ /* Allow headroom on each rx pkt for Ethernet header, alignment padding, ... */
+ #define RX_HEADROOM 200
+
+ /*
+ * If the backend driver is pipelining transmit requests then we can be very
+ * aggressive in avoiding new-packet notifications -- only need to send a
+ * notification if there are no outstanding unreceived responses.
+ * If the backend may be buffering our transmit buffers for any reason then we
+ * are rather more conservative.
+ */
+ #ifdef CONFIG_XEN_NETDEV_FRONTEND_PIPELINED_TRANSMITTER
+ #define TX_TEST_IDX resp_prod /* aggressive: any outstanding responses? */
+ #else
+ #define TX_TEST_IDX req_cons /* conservative: not seen all our requests? */
+ #endif
+
+ static void network_tx_buf_gc(struct net_device *dev);
+ static void network_alloc_rx_buffers(struct net_device *dev);
+
+ static unsigned long rx_pfn_array[NETIF_RX_RING_SIZE];
+ static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE+1];
+ static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
+
+ static struct list_head dev_list;
+
+ struct net_private
+ {
+ struct list_head list;
+ struct net_device *dev;
+
+ struct net_device_stats stats;
+ NETIF_RING_IDX rx_resp_cons, tx_resp_cons;
+ unsigned int tx_full;
+
+ netif_tx_interface_t *tx;
+ netif_rx_interface_t *rx;
+
+ spinlock_t tx_lock;
+ spinlock_t rx_lock;
+
+ unsigned int handle;
+ unsigned int evtchn;
+ unsigned int irq;
+
+ /* What is the status of our connection to the remote backend? */
+ #define BEST_CLOSED 0
+ #define BEST_DISCONNECTED 1
+ #define BEST_CONNECTED 2
+ unsigned int backend_state;
+
+ /* Is this interface open or closed (down or up)? */
+ #define UST_CLOSED 0
+ #define UST_OPEN 1
+ unsigned int user_state;
+
+ /* Receive-ring batched refills. */
+ #define RX_MIN_TARGET 8
+ #define RX_MAX_TARGET NETIF_RX_RING_SIZE
+ int rx_target;
+ struct sk_buff_head rx_batch;
+
+ /*
+ * {tx,rx}_skbs store outstanding skbuffs. The first entry in each
+ * array is an index into a chain of free entries.
+ */
+ struct sk_buff *tx_skbs[NETIF_TX_RING_SIZE+1];
+ struct sk_buff *rx_skbs[NETIF_RX_RING_SIZE+1];
+ };
+
+ /* Access macros for acquiring freeing slots in {tx,rx}_skbs[]. */
+ #define ADD_ID_TO_FREELIST(_list, _id) \
+ (_list)[(_id)] = (_list)[0]; \
+ (_list)[0] = (void *)(unsigned long)(_id);
+ #define GET_ID_FROM_FREELIST(_list) \
+ ({ unsigned long _id = (unsigned long)(_list)[0]; \
+ (_list)[0] = (_list)[_id]; \
+ (unsigned short)_id; })
+
+ static char *status_name[] = {
+ [NETIF_INTERFACE_STATUS_CLOSED] = "closed",
+ [NETIF_INTERFACE_STATUS_DISCONNECTED] = "disconnected",
+ [NETIF_INTERFACE_STATUS_CONNECTED] = "connected",
+ [NETIF_INTERFACE_STATUS_CHANGED] = "changed",
+ };
+
+ static char *be_state_name[] = {
+ [BEST_CLOSED] = "closed",
+ [BEST_DISCONNECTED] = "disconnected",
+ [BEST_CONNECTED] = "connected",
+ };
+
+ #if DEBUG
+ #define DPRINTK(fmt, args...) \
+ printk(KERN_ALERT "xen_net (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
+ #else
+ #define DPRINTK(fmt, args...) ((void)0)
+ #endif
+ #define IPRINTK(fmt, args...) \
+ printk(KERN_INFO "xen_net: " fmt, ##args)
+ #define WPRINTK(fmt, args...) \
+ printk(KERN_WARNING "xen_net: " fmt, ##args)
+
+ static struct net_device *find_dev_by_handle(unsigned int handle)
+ {
+ struct list_head *ent;
+ struct net_private *np;
+ list_for_each (ent, &dev_list) {
+ np = list_entry(ent, struct net_private, list);
+ if (np->handle == handle)
+ return np->dev;
+ }
+ return NULL;
+ }
+
+ /** Network interface info. */
+ struct netif_ctrl {
+ /** Number of interfaces. */
+ int interface_n;
+ /** Number of connected interfaces. */
+ int connected_n;
+ /** Error code. */
+ int err;
+ int up;
+ };
+
+ static struct netif_ctrl netctrl;
+
+ static void netctrl_init(void)
+ {
+ memset(&netctrl, 0, sizeof(netctrl));
+ netctrl.up = NETIF_DRIVER_STATUS_DOWN;
+ }
+
+ /** Get or set a network interface error.
+ */
+ static int netctrl_err(int err)
+ {
+ if ((err < 0) && !netctrl.err)
+ netctrl.err = err;
+ return netctrl.err;
+ }
+
+ /** Test if all network interfaces are connected.
+ *
+ * @return 1 if all connected, 0 if not, negative error code otherwise
+ */
+ static int netctrl_connected(void)
+ {
+ int ok;
+
+ if (netctrl.err)
+ ok = netctrl.err;
+ else if (netctrl.up == NETIF_DRIVER_STATUS_UP)
+ ok = (netctrl.connected_n == netctrl.interface_n);
+ else
+ ok = 0;
+
+ return ok;
+ }
+
+ /** Count the connected network interfaces.
+ *
+ * @return connected count
+ */
+ static int netctrl_connected_count(void)
+ {
+
+ struct list_head *ent;
+ struct net_private *np;
+ unsigned int connected;
+
+ connected = 0;
+
+ list_for_each(ent, &dev_list) {
+ np = list_entry(ent, struct net_private, list);
+ if (np->backend_state == BEST_CONNECTED)
+ connected++;
+ }
+
+ netctrl.connected_n = connected;
+ DPRINTK("> connected_n=%d interface_n=%d\n",
+ netctrl.connected_n, netctrl.interface_n);
+ return connected;
+ }
+
+ /** Send a packet on a net device to encourage switches to learn the
+ * MAC. We send a fake ARP request.
+ *
+ * @param dev device
+ * @return 0 on success, error code otherwise
+ */
+ static int send_fake_arp(struct net_device *dev)
+ {
+ struct sk_buff *skb;
+ u32 src_ip, dst_ip;
+
+ dst_ip = INADDR_BROADCAST;
+ src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
+
+ /* No IP? Then nothing to do. */
+ if (src_ip == 0)
+ return 0;
+
+ skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
+ dst_ip, dev, src_ip,
+ /*dst_hw*/ NULL, /*src_hw*/ NULL,
+ /*target_hw*/ dev->dev_addr);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ return dev_queue_xmit(skb);
+ }
+
+ static int network_open(struct net_device *dev)
+ {
+ struct net_private *np = netdev_priv(dev);
+
+ memset(&np->stats, 0, sizeof(np->stats));
+
+ np->user_state = UST_OPEN;
+
+ network_alloc_rx_buffers(dev);
+ np->rx->event = np->rx_resp_cons + 1;
+
+ netif_start_queue(dev);
+
+ return 0;
+ }
+
+ static void network_tx_buf_gc(struct net_device *dev)
+ {
+ NETIF_RING_IDX i, prod;
+ unsigned short id;
+ struct net_private *np = netdev_priv(dev);
+ struct sk_buff *skb;
+
+ if (np->backend_state != BEST_CONNECTED)
+ return;
+
+ do {
+ prod = np->tx->resp_prod;
+ rmb(); /* Ensure we see responses up to 'rp'. */
+
+ for (i = np->tx_resp_cons; i != prod; i++) {
+ id = np->tx->ring[MASK_NETIF_TX_IDX(i)].resp.id;
+ skb = np->tx_skbs[id];
+ ADD_ID_TO_FREELIST(np->tx_skbs, id);
+ dev_kfree_skb_irq(skb);
+ }
+
+ np->tx_resp_cons = prod;
+
+ /*
+ * Set a new event, then check for race with update of tx_cons. Note
+ * that it is essential to schedule a callback, no matter how few
+ * buffers are pending. Even if there is space in the transmit ring,
+ * higher layers may be blocked because too much data is outstanding:
+ * in such cases notification from Xen is likely to be the only kick
+ * that we'll get.
+ */
+ np->tx->event =
+ prod + ((np->tx->req_prod - prod) >> 1) + 1;
+ mb();
+ } while (prod != np->tx->resp_prod);
+
+ if (np->tx_full && ((np->tx->req_prod - prod) < NETIF_TX_RING_SIZE)) {
+ np->tx_full = 0;
+ if (np->user_state == UST_OPEN)
+ netif_wake_queue(dev);
+ }
+ }
+
+
+ static void network_alloc_rx_buffers(struct net_device *dev)
+ {
+ unsigned short id;
+ struct net_private *np = netdev_priv(dev);
+ struct sk_buff *skb;
+ int i, batch_target;
+ NETIF_RING_IDX req_prod = np->rx->req_prod;
+
+ if (unlikely(np->backend_state != BEST_CONNECTED))
+ return;
+
+ /*
+ * Allocate skbuffs greedily, even though we batch updates to the
+ * receive ring. This creates a less bursty demand on the memory allocator,
+ * so should reduce the chance of failed allocation requests both for
+ * ourself and for other kernel subsystems.
+ */
+ batch_target = np->rx_target - (req_prod - np->rx_resp_cons);
+ for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
+ if (unlikely((skb = alloc_xen_skb(dev->mtu + RX_HEADROOM)) == NULL))
+ break;
+ __skb_queue_tail(&np->rx_batch, skb);
+ }
+
+ /* Is the batch large enough to be worthwhile? */
+ if (i < (np->rx_target/2))
+ return;
+
+ for (i = 0; ; i++) {
+ if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
+ break;
+
+ skb->dev = dev;
+
+ id = GET_ID_FROM_FREELIST(np->rx_skbs);
+
+ np->rx_skbs[id] = skb;
+
+ np->rx->ring[MASK_NETIF_RX_IDX(req_prod + i)].req.id = id;
+
+ rx_pfn_array[i] = virt_to_machine(skb->head) >> PAGE_SHIFT;
+
+ /* Remove this page from pseudo phys map before passing back to Xen. */
+ phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT]
+ = INVALID_P2M_ENTRY;
+
+ rx_mcl[i].op = __HYPERVISOR_update_va_mapping;
- mcl->args[0] = (unsigned long)skb->head >> PAGE_SHIFT;
++ rx_mcl[i].args[0] = (unsigned long)skb->head;
+ rx_mcl[i].args[1] = 0;
+ rx_mcl[i].args[2] = 0;
+ }
+
+ /*
+ * We may have allocated buffers which have entries outstanding in the page
+ * update queue -- make sure we flush those first!
+ */
+ flush_page_update_queue();
+
+ /* After all PTEs have been zapped we blow away stale TLB entries. */
+ rx_mcl[i-1].args[2] = UVMF_FLUSH_TLB;
+
+ /* Give away a batch of pages. */
+ rx_mcl[i].op = __HYPERVISOR_dom_mem_op;
+ rx_mcl[i].args[0] = MEMOP_decrease_reservation;
+ rx_mcl[i].args[1] = (unsigned long)rx_pfn_array;
+ rx_mcl[i].args[2] = (unsigned long)i;
+ rx_mcl[i].args[3] = 0;
+ rx_mcl[i].args[4] = DOMID_SELF;
+
+ /* Tell the ballon driver what is going on. */
+ balloon_update_driver_allowance(i);
+
+ /* Zap PTEs and give away pages in one big multicall. */
+ (void)HYPERVISOR_multicall(rx_mcl, i+1);
+
+ /* Check return status of HYPERVISOR_dom_mem_op(). */
+ if (unlikely(rx_mcl[i].args[5] != i))
+ panic("Unable to reduce memory reservation\n");
+
+ /* Above is a suitable barrier to ensure backend will see requests. */
+ np->rx->req_prod = req_prod + i;
+
+ /* Adjust our floating fill target if we risked running out of buffers. */
+ if (((req_prod - np->rx->resp_prod) < (np->rx_target / 4)) &&
+ ((np->rx_target *= 2) > RX_MAX_TARGET))
+ np->rx_target = RX_MAX_TARGET;
+ }
+
+
+ static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ unsigned short id;
+ struct net_private *np = netdev_priv(dev);
+ netif_tx_request_t *tx;
+ NETIF_RING_IDX i;
+
+ if (unlikely(np->tx_full)) {
+ printk(KERN_ALERT "%s: full queue wasn't stopped!\n", dev->name);
+ netif_stop_queue(dev);
+ goto drop;
+ }
+
+ if (unlikely((((unsigned long)skb->data & ~PAGE_MASK) + skb->len) >=
+ PAGE_SIZE)) {
+ struct sk_buff *nskb;
+ if (unlikely((nskb = alloc_xen_skb(skb->len)) == NULL))
+ goto drop;
+ skb_put(nskb, skb->len);
+ memcpy(nskb->data, skb->data, skb->len);
+ nskb->dev = skb->dev;
+ dev_kfree_skb(skb);
+ skb = nskb;
+ }
+
+ spin_lock_irq(&np->tx_lock);
+
+ if (np->backend_state != BEST_CONNECTED) {
+ spin_unlock_irq(&np->tx_lock);
+ goto drop;
+ }
+
+ i = np->tx->req_prod;
+
+ id = GET_ID_FROM_FREELIST(np->tx_skbs);
+ np->tx_skbs[id] = skb;
+
+ tx = &np->tx->ring[MASK_NETIF_TX_IDX(i)].req;
+
+ tx->id = id;
+ tx->addr = virt_to_machine(skb->data);
+ tx->size = skb->len;
+
+ wmb(); /* Ensure that backend will see the request. */
+ np->tx->req_prod = i + 1;
+
+ network_tx_buf_gc(dev);
+
+ if ((i - np->tx_resp_cons) == (NETIF_TX_RING_SIZE - 1)) {
+ np->tx_full = 1;
+ netif_stop_queue(dev);
+ }
+
+ spin_unlock_irq(&np->tx_lock);
+
+ np->stats.tx_bytes += skb->len;
+ np->stats.tx_packets++;
+
+ /* Only notify Xen if we really have to. */
+ mb();
+ if (np->tx->TX_TEST_IDX == i)
+ notify_via_evtchn(np->evtchn);
+
+ return 0;
+
+ drop:
+ np->stats.tx_dropped++;
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
+ {
+ struct net_device *dev = dev_id;
+ struct net_private *np = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&np->tx_lock, flags);
+ network_tx_buf_gc(dev);
+ spin_unlock_irqrestore(&np->tx_lock, flags);
+
+ if ((np->rx_resp_cons != np->rx->resp_prod) && (np->user_state == UST_OPEN))
+ netif_rx_schedule(dev);
+
+ return IRQ_HANDLED;
+ }
+
+
+ static int netif_poll(struct net_device *dev, int *pbudget)
+ {
+ struct net_private *np = netdev_priv(dev);
+ struct sk_buff *skb, *nskb;
+ netif_rx_response_t *rx;
+ NETIF_RING_IDX i, rp;
+ mmu_update_t *mmu = rx_mmu;
+ multicall_entry_t *mcl = rx_mcl;
+ int work_done, budget, more_to_do = 1;
+ struct sk_buff_head rxq;
+ unsigned long flags;
+
+ spin_lock(&np->rx_lock);
+
+ if (np->backend_state != BEST_CONNECTED) {
+ spin_unlock(&np->rx_lock);
+ return 0;
+ }
+
+ skb_queue_head_init(&rxq);
+
+ if ((budget = *pbudget) > dev->quota)
+ budget = dev->quota;
+
+ rp = np->rx->resp_prod;
+ rmb(); /* Ensure we see queued responses up to 'rp'. */
+
+ for (i = np->rx_resp_cons, work_done = 0;
+ (i != rp) && (work_done < budget);
+ i++, work_done++) {
+ rx = &np->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
+
+ /*
+ * An error here is very odd. Usually indicates a backend bug,
+ * low-memory condition, or that we didn't have reservation headroom.
+ */
+ if (unlikely(rx->status <= 0)) {
+ if (net_ratelimit())
+ printk(KERN_WARNING "Bad rx buffer (memory squeeze?).\n");
+ np->rx->ring[MASK_NETIF_RX_IDX(np->rx->req_prod)].req.id = rx->id;
+ wmb();
+ np->rx->req_prod++;
+ work_done--;
+ continue;
+ }
+
+ skb = np->rx_skbs[rx->id];
+ ADD_ID_TO_FREELIST(np->rx_skbs, rx->id);
+
+ /* NB. We handle skb overflow later. */
+ skb->data = skb->head + (rx->addr & ~PAGE_MASK);
+ skb->len = rx->status;
+ skb->tail = skb->data + skb->len;
+
+ np->stats.rx_packets++;
+ np->stats.rx_bytes += rx->status;
+
+ /* Remap the page. */
+ mmu->ptr = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE;
+ mmu->val = __pa(skb->head) >> PAGE_SHIFT;
+ mmu++;
+ mcl->op = __HYPERVISOR_update_va_mapping;
++ mcl->args[0] = (unsigned long)skb->head;
+ mcl->args[1] = (rx->addr & PAGE_MASK) | __PAGE_KERNEL;
+ mcl->args[2] = 0;
+ mcl++;
+
+ phys_to_machine_mapping[__pa(skb->head) >> PAGE_SHIFT] =
+ rx->addr >> PAGE_SHIFT;
+
+ __skb_queue_tail(&rxq, skb);
+ }
+
+ /* Some pages are no longer absent... */
+ balloon_update_driver_allowance(-work_done);
+
+ /* Do all the remapping work, and M->P updates, in one big hypercall. */
+ if (likely((mcl - rx_mcl) != 0)) {
+ mcl->op = __HYPERVISOR_mmu_update;
+ mcl->args[0] = (unsigned long)rx_mmu;
+ mcl->args[1] = mmu - rx_mmu;
+ mcl->args[2] = 0;
+ mcl++;
+ (void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
+ }
+
+ while ((skb = __skb_dequeue(&rxq)) != NULL) {
+ /*
+ * Enough room in skbuff for the data we were passed? Also, Linux
+ * expects at least 16 bytes headroom in each receive buffer.
+ */
+ if (unlikely(skb->tail > skb->end) ||
+ unlikely((skb->data - skb->head) < 16)) {
+ nskb = NULL;
+
+ /* Only copy the packet if it fits in the current MTU. */
+ if (skb->len <= (dev->mtu + ETH_HLEN)) {
+ if ((skb->tail > skb->end) && net_ratelimit())
+ printk(KERN_INFO "Received packet needs %d bytes more "
+ "headroom.\n", skb->tail - skb->end);
+
+ if ((nskb = alloc_xen_skb(skb->len + 2)) != NULL) {
+ skb_reserve(nskb, 2);
+ skb_put(nskb, skb->len);
+ memcpy(nskb->data, skb->data, skb->len);
+ nskb->dev = skb->dev;
+ }
+ }
+ else if (net_ratelimit())
+ printk(KERN_INFO "Received packet too big for MTU "
+ "(%d > %d)\n", skb->len - ETH_HLEN, dev->mtu);
+
+ /* Reinitialise and then destroy the old skbuff. */
+ skb->len = 0;
+ skb->tail = skb->data;
+ init_skb_shinfo(skb);
+ dev_kfree_skb(skb);
+
+ /* Switch old for new, if we copied the buffer. */
+ if ((skb = nskb) == NULL)
+ continue;
+ }
+
+ /* Set the shared-info area, which is hidden behind the real data. */
+ init_skb_shinfo(skb);
+
+ /* Ethernet-specific work. Delayed to here as it peeks the header. */
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /* Pass it up. */
+ netif_receive_skb(skb);
+ dev->last_rx = jiffies;
+ }
+
+ np->rx_resp_cons = i;
+
+ /* If we get a callback with very few responses, reduce fill target. */
+ /* NB. Note exponential increase, linear decrease. */
+ if (((np->rx->req_prod - np->rx->resp_prod) > ((3*np->rx_target) / 4)) &&
+ (--np->rx_target < RX_MIN_TARGET))
+ np->rx_target = RX_MIN_TARGET;
+
+ network_alloc_rx_buffers(dev);
+
+ *pbudget -= work_done;
+ dev->quota -= work_done;
+
+ if (work_done < budget) {
+ local_irq_save(flags);
+
+ np->rx->event = i + 1;
+
+ /* Deal with hypervisor racing our resetting of rx_event. */
+ mb();
+ if (np->rx->resp_prod == i) {
+ __netif_rx_complete(dev);
+ more_to_do = 0;
+ }
+
+ local_irq_restore(flags);
+ }
+
+ spin_unlock(&np->rx_lock);
+
+ return more_to_do;
+ }
+
+
+ static int network_close(struct net_device *dev)
+ {
+ struct net_private *np = netdev_priv(dev);
+ np->user_state = UST_CLOSED;
+ netif_stop_queue(np->dev);
+ return 0;
+ }
+
+
+ static struct net_device_stats *network_get_stats(struct net_device *dev)
+ {
+ struct net_private *np = netdev_priv(dev);
+ return &np->stats;
+ }
+
+
+ static void network_connect(struct net_device *dev,
+ netif_fe_interface_status_t *status)
+ {
+ struct net_private *np;
+ int i, requeue_idx;
+ netif_tx_request_t *tx;
+
+ np = netdev_priv(dev);
+ spin_lock_irq(&np->tx_lock);
+ spin_lock(&np->rx_lock);
+
+ /* Recovery procedure: */
+
+ /* Step 1: Reinitialise variables. */
+ np->rx_resp_cons = np->tx_resp_cons = np->tx_full = 0;
+ np->rx->event = np->tx->event = 1;
+
+ /* Step 2: Rebuild the RX and TX ring contents.
+ * NB. We could just free the queued TX packets now but we hope
+ * that sending them out might do some good. We have to rebuild
+ * the RX ring because some of our pages are currently flipped out
+ * so we can't just free the RX skbs.
+ * NB2. Freelist index entries are always going to be less than
+ * __PAGE_OFFSET, whereas pointers to skbs will always be equal or
+ * greater than __PAGE_OFFSET: we use this property to distinguish
+ * them.
+ */
+
+ /* Rebuild the TX buffer freelist and the TX ring itself.
+ * NB. This reorders packets. We could keep more private state
+ * to avoid this but maybe it doesn't matter so much given the
+ * interface has been down.
+ */
+ for (requeue_idx = 0, i = 1; i <= NETIF_TX_RING_SIZE; i++) {
+ if ((unsigned long)np->tx_skbs[i] >= __PAGE_OFFSET) {
+ struct sk_buff *skb = np->tx_skbs[i];
+
+ tx = &np->tx->ring[requeue_idx++].req;
+
+ tx->id = i;
+ tx->addr = virt_to_machine(skb->data);
+ tx->size = skb->len;
+
+ np->stats.tx_bytes += skb->len;
+ np->stats.tx_packets++;
+ }
+ }
+ wmb();
+ np->tx->req_prod = requeue_idx;
+
+ /* Rebuild the RX buffer freelist and the RX ring itself. */
+ for (requeue_idx = 0, i = 1; i <= NETIF_RX_RING_SIZE; i++)
+ if ((unsigned long)np->rx_skbs[i] >= __PAGE_OFFSET)
+ np->rx->ring[requeue_idx++].req.id = i;
+ wmb();
+ np->rx->req_prod = requeue_idx;
+
+ /* Step 3: All public and private state should now be sane. Get
+ * ready to start sending and receiving packets and give the driver
+ * domain a kick because we've probably just requeued some
+ * packets.
+ */
+ np->backend_state = BEST_CONNECTED;
+ wmb();
+ notify_via_evtchn(status->evtchn);
+ network_tx_buf_gc(dev);
+
+ if (np->user_state == UST_OPEN)
+ netif_start_queue(dev);
+
+ spin_unlock(&np->rx_lock);
+ spin_unlock_irq(&np->tx_lock);
+ }
+
+ static void vif_show(struct net_private *np)
+ {
+ #if DEBUG
+ if (np) {
+ IPRINTK("<vif handle=%u %s(%s) evtchn=%u irq=%u tx=%p rx=%p>\n",
+ np->handle,
+ be_state_name[np->backend_state],
+ np->user_state ? "open" : "closed",
+ np->evtchn,
+ np->irq,
+ np->tx,
+ np->rx);
+ } else {
+ IPRINTK("<vif NULL>\n");
+ }
+ #endif
+ }
+
+ /* Send a connect message to xend to tell it to bring up the interface. */
+ static void send_interface_connect(struct net_private *np)
+ {
+ ctrl_msg_t cmsg = {
+ .type = CMSG_NETIF_FE,
+ .subtype = CMSG_NETIF_FE_INTERFACE_CONNECT,
+ .length = sizeof(netif_fe_interface_connect_t),
+ };
+ netif_fe_interface_connect_t *msg = (void*)cmsg.msg;
+
+ msg->handle = np->handle;
+ msg->tx_shmem_frame = (virt_to_machine(np->tx) >> PAGE_SHIFT);
+ msg->rx_shmem_frame = (virt_to_machine(np->rx) >> PAGE_SHIFT);
+
+ ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+ }
+
+ /* Send a driver status notification to the domain controller. */
+ static int send_driver_status(int ok)
+ {
+ int err = 0;
+ ctrl_msg_t cmsg = {
+ .type = CMSG_NETIF_FE,
+ .subtype = CMSG_NETIF_FE_DRIVER_STATUS,
+ .length = sizeof(netif_fe_driver_status_t),
+ };
+ netif_fe_driver_status_t *msg = (void*)cmsg.msg;
+
+ msg->status = (ok ? NETIF_DRIVER_STATUS_UP : NETIF_DRIVER_STATUS_DOWN);
+ err = ctrl_if_send_message_block(&cmsg, NULL, 0, TASK_UNINTERRUPTIBLE);
+ return err;
+ }
+
+ /* Stop network device and free tx/rx queues and irq.
+ */
+ static void vif_release(struct net_private *np)
+ {
+ /* Stop old i/f to prevent errors whilst we rebuild the state. */
+ spin_lock_irq(&np->tx_lock);
+ spin_lock(&np->rx_lock);
+ netif_stop_queue(np->dev);
+ /* np->backend_state = BEST_DISCONNECTED; */
+ spin_unlock(&np->rx_lock);
+ spin_unlock_irq(&np->tx_lock);
+
+ /* Free resources. */
+ if(np->tx != NULL){
+ free_irq(np->irq, np->dev);
+ unbind_evtchn_from_irq(np->evtchn);
+ free_page((unsigned long)np->tx);
+ free_page((unsigned long)np->rx);
+ np->irq = 0;
+ np->evtchn = 0;
+ np->tx = NULL;
+ np->rx = NULL;
+ }
+ }
+
+ /* Release vif resources and close it down completely.
+ */
+ static void vif_close(struct net_private *np)
+ {
+ WPRINTK("Unexpected netif-CLOSED message in state %s\n",
+ be_state_name[np->backend_state]);
+ vif_release(np);
+ np->backend_state = BEST_CLOSED;
+ /* todo: take dev down and free. */
+ vif_show(np);
+ }
+
+ /* Move the vif into disconnected state.
+ * Allocates tx/rx pages.
+ * Sends connect message to xend.
+ */
+ static void vif_disconnect(struct net_private *np)
+ {
+ if(np->tx) free_page((unsigned long)np->tx);
+ if(np->rx) free_page((unsigned long)np->rx);
+ // Before this np->tx and np->rx had better be null.
+ np->tx = (netif_tx_interface_t *)__get_free_page(GFP_KERNEL);
+ np->rx = (netif_rx_interface_t *)__get_free_page(GFP_KERNEL);
+ memset(np->tx, 0, PAGE_SIZE);
+ memset(np->rx, 0, PAGE_SIZE);
+ np->backend_state = BEST_DISCONNECTED;
+ send_interface_connect(np);
+ vif_show(np);
+ }
+
+ /* Begin interface recovery.
+ *
+ * NB. Whilst we're recovering, we turn the carrier state off. We
+ * take measures to ensure that this device isn't used for
+ * anything. We also stop the queue for this device. Various
+ * different approaches (e.g. continuing to buffer packets) have
+ * been tested but don't appear to improve the overall impact on
+ * TCP connections.
+ *
+ * TODO: (MAW) Change the Xend<->Guest protocol so that a recovery
+ * is initiated by a special "RESET" message - disconnect could
+ * just mean we're not allowed to use this interface any more.
+ */
+ static void vif_reset(struct net_private *np)
+ {
+ IPRINTK("Attempting to reconnect network interface: handle=%u\n",
+ np->handle);
+ vif_release(np);
+ vif_disconnect(np);
+ vif_show(np);
+ }
+
+ /* Move the vif into connected state.
+ * Sets the mac and event channel from the message.
+ * Binds the irq to the event channel.
+ */
+ static void
+ vif_connect(struct net_private *np, netif_fe_interface_status_t *status)
+ {
+ struct net_device *dev = np->dev;
+ memcpy(dev->dev_addr, status->mac, ETH_ALEN);
+ network_connect(dev, status);
+ np->evtchn = status->evtchn;
+ np->irq = bind_evtchn_to_irq(np->evtchn);
+ (void)request_irq(np->irq, netif_int, SA_SAMPLE_RANDOM, dev->name, dev);
+ netctrl_connected_count();
+ (void)send_fake_arp(dev);
+ vif_show(np);
+ }
+
+
+ /** Create a network device.
+ * @param handle device handle
+ * @param val return parameter for created device
+ * @return 0 on success, error code otherwise
+ */
+ static int create_netdev(int handle, struct net_device **val)
+ {
+ int i, err = 0;
+ struct net_device *dev = NULL;
+ struct net_private *np = NULL;
+
+ if ((dev = alloc_etherdev(sizeof(struct net_private))) == NULL) {
+ printk(KERN_WARNING "%s> alloc_etherdev failed.\n", __FUNCTION__);
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ np = netdev_priv(dev);
+ np->backend_state = BEST_CLOSED;
+ np->user_state = UST_CLOSED;
+ np->handle = handle;
+
+ spin_lock_init(&np->tx_lock);
+ spin_lock_init(&np->rx_lock);
+
+ skb_queue_head_init(&np->rx_batch);
+ np->rx_target = RX_MIN_TARGET;
+
+ /* Initialise {tx,rx}_skbs to be a free chain containing every entry. */
+ for (i = 0; i <= NETIF_TX_RING_SIZE; i++)
+ np->tx_skbs[i] = (void *)(i+1);
+ for (i = 0; i <= NETIF_RX_RING_SIZE; i++)
+ np->rx_skbs[i] = (void *)(i+1);
+
+ dev->open = network_open;
+ dev->hard_start_xmit = network_start_xmit;
+ dev->stop = network_close;
+ dev->get_stats = network_get_stats;
+ dev->poll = netif_poll;
+ dev->weight = 64;
+
+ if ((err = register_netdev(dev)) != 0) {
+ printk(KERN_WARNING "%s> register_netdev err=%d\n", __FUNCTION__, err);
+ goto exit;
+ }
+ np->dev = dev;
+ list_add(&np->list, &dev_list);
+
+ exit:
+ if ((err != 0) && (dev != NULL ))
+ kfree(dev);
+ else if (val != NULL)
+ *val = dev;
+ return err;
+ }
+
+ /* Get the target interface for a status message.
+ * Creates the interface when it makes sense.
+ * The returned interface may be null when there is no error.
+ *
+ * @param status status message
+ * @param np return parameter for interface state
+ * @return 0 on success, error code otherwise
+ */
+ static int
+ target_vif(netif_fe_interface_status_t *status, struct net_private **np)
+ {
+ int err = 0;
+ struct net_device *dev;
+
+ DPRINTK("> handle=%d\n", status->handle);
+ if (status->handle < 0) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if ((dev = find_dev_by_handle(status->handle)) != NULL)
+ goto exit;
+
+ if (status->status == NETIF_INTERFACE_STATUS_CLOSED)
+ goto exit;
+ if (status->status == NETIF_INTERFACE_STATUS_CHANGED)
+ goto exit;
+
+ /* It's a new interface in a good state - create it. */
+ DPRINTK("> create device...\n");
+ if ((err = create_netdev(status->handle, &dev)) != 0)
+ goto exit;
+
+ netctrl.interface_n++;
+
+ exit:
+ if (np != NULL)
+ *np = ((dev && !err) ? netdev_priv(dev) : NULL);
+ DPRINTK("< err=%d\n", err);
+ return err;
+ }
+
+ /* Handle an interface status message. */
+ static void netif_interface_status(netif_fe_interface_status_t *status)
+ {
+ int err = 0;
+ struct net_private *np = NULL;
+
+ DPRINTK("> status=%s handle=%d\n",
+ status_name[status->status], status->handle);
+
+ if ((err = target_vif(status, &np)) != 0) {
+ WPRINTK("Invalid netif: handle=%u\n", status->handle);
+ return;
+ }
+
+ if (np == NULL) {
+ DPRINTK("> no vif\n");
+ return;
+ }
+
+ switch (status->status) {
+ case NETIF_INTERFACE_STATUS_CLOSED:
+ switch (np->backend_state) {
+ case BEST_CLOSED:
+ case BEST_DISCONNECTED:
+ case BEST_CONNECTED:
+ vif_close(np);
+ break;
+ }
+ break;
+
+ case NETIF_INTERFACE_STATUS_DISCONNECTED:
+ switch (np->backend_state) {
+ case BEST_CLOSED:
+ vif_disconnect(np);
+ break;
+ case BEST_DISCONNECTED:
+ case BEST_CONNECTED:
+ vif_reset(np);
+ break;
+ }
+ break;
+
+ case NETIF_INTERFACE_STATUS_CONNECTED:
+ switch (np->backend_state) {
+ case BEST_CLOSED:
+ WPRINTK("Unexpected netif status %s in state %s\n",
+ status_name[status->status],
+ be_state_name[np->backend_state]);
+ vif_disconnect(np);
+ vif_connect(np, status);
+ break;
+ case BEST_DISCONNECTED:
+ vif_connect(np, status);
+ break;
+ }
+ break;
+
+ case NETIF_INTERFACE_STATUS_CHANGED:
+ /*
+ * The domain controller is notifying us that a device has been
+ * added or removed.
+ */
+ break;
+
+ default:
+ WPRINTK("Invalid netif status code %d\n", status->status);
+ break;
+ }
+
+ vif_show(np);
+ }
+
+ /*
+ * Initialize the network control interface.
+ */
+ static void netif_driver_status(netif_fe_driver_status_t *status)
+ {
+ netctrl.up = status->status;
+ netctrl_connected_count();
+ }
+
+ /* Receive handler for control messages. */
+ static void netif_ctrlif_rx(ctrl_msg_t *msg, unsigned long id)
+ {
+
+ switch (msg->subtype) {
+ case CMSG_NETIF_FE_INTERFACE_STATUS:
+ if (msg->length != sizeof(netif_fe_interface_status_t))
+ goto error;
+ netif_interface_status((netif_fe_interface_status_t *) &msg->msg[0]);
+ break;
+
+ case CMSG_NETIF_FE_DRIVER_STATUS:
+ if (msg->length != sizeof(netif_fe_driver_status_t))
+ goto error;
+ netif_driver_status((netif_fe_driver_status_t *) &msg->msg[0]);
+ break;
+
+ error:
+ default:
+ msg->length = 0;
+ break;
+ }
+
+ ctrl_if_send_response(msg);
+ }
+
+
+ #if 1
+ /* Wait for all interfaces to be connected.
+ *
+ * This works OK, but we'd like to use the probing mode (see below).
+ */
+ static int probe_interfaces(void)
+ {
+ int err = 0, conn = 0;
+ int wait_i, wait_n = 100;
+
+ DPRINTK(">\n");
+
+ for (wait_i = 0; wait_i < wait_n; wait_i++) {
+ DPRINTK("> wait_i=%d\n", wait_i);
+ conn = netctrl_connected();
+ if(conn) break;
+ DPRINTK("> schedule_timeout...\n");
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(10);
+ }
+
+ DPRINTK("> wait finished...\n");
+ if (conn <= 0) {
+ err = netctrl_err(-ENETDOWN);
+ WPRINTK("Failed to connect all virtual interfaces: err=%d\n", err);
+ }
+
+ DPRINTK("< err=%d\n", err);
+
+ return err;
+ }
+ #else
+ /* Probe for interfaces until no more are found.
+ *
+ * This is the mode we'd like to use, but at the moment it panics the kernel.
+ */
+ static int probe_interfaces(void)
+ {
+ int err = 0;
+ int wait_i, wait_n = 100;
+ ctrl_msg_t cmsg = {
+ .type = CMSG_NETIF_FE,
+ .subtype = CMSG_NETIF_FE_INTERFACE_STATUS,
+ .length = sizeof(netif_fe_interface_status_t),
+ };
+ netif_fe_interface_status_t msg = {};
+ ctrl_msg_t rmsg = {};
+ netif_fe_interface_status_t *reply = (void*)rmsg.msg;
+ int state = TASK_UNINTERRUPTIBLE;
+ u32 query = -1;
+
+ DPRINTK(">\n");
+
+ netctrl.interface_n = 0;
+ for (wait_i = 0; wait_i < wait_n; wait_i++) {
+ DPRINTK("> wait_i=%d query=%d\n", wait_i, query);
+ msg.handle = query;
+ memcpy(cmsg.msg, &msg, sizeof(msg));
+ DPRINTK("> set_current_state...\n");
+ set_current_state(state);
+ DPRINTK("> rmsg=%p msg=%p, reply=%p\n", &rmsg, rmsg.msg, reply);
+ DPRINTK("> sending...\n");
+ err = ctrl_if_send_message_and_get_response(&cmsg, &rmsg, state);
+ DPRINTK("> err=%d\n", err);
+ if(err) goto exit;
+ DPRINTK("> rmsg=%p msg=%p, reply=%p\n", &rmsg, rmsg.msg, reply);
+ if((int)reply->handle < 0) {
+ // No more interfaces.
+ break;
+ }
+ query = -reply->handle - 2;
+ DPRINTK(">netif_interface_status ...\n");
+ netif_interface_status(reply);
+ }
+
+ exit:
+ if (err) {
+ err = netctrl_err(-ENETDOWN);
+ WPRINTK("Connecting virtual network interfaces failed: err=%d\n", err);
+ }
+
+ DPRINTK("< err=%d\n", err);
+ return err;
+ }
+
+ #endif
+
+ /*
+ * We use this notifier to send out a fake ARP reply to reset switches and
+ * router ARP caches when an IP interface is brought up on a VIF.
+ */
+ static int
+ inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr)
+ {
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+ struct net_device *dev = ifa->ifa_dev->dev;
+ struct list_head *ent;
+ struct net_private *np;
+
+ if (event != NETDEV_UP)
+ goto out;
+
+ list_for_each (ent, &dev_list) {
+ np = list_entry(ent, struct net_private, list);
+ if (np->dev == dev)
+ (void)send_fake_arp(dev);
+ }
+
+ out:
+ return NOTIFY_DONE;
+ }
+
+ static struct notifier_block notifier_inetdev = {
+ .notifier_call = inetdev_notify,
+ .next = NULL,
+ .priority = 0
+ };
+
+ static int __init netif_init(void)
+ {
+ int err = 0;
+
+ if (xen_start_info.flags & SIF_INITDOMAIN)
+ return 0;
+
+ IPRINTK("Initialising virtual ethernet driver.\n");
+ INIT_LIST_HEAD(&dev_list);
+ (void)register_inetaddr_notifier(¬ifier_inetdev);
+ netctrl_init();
+ (void)ctrl_if_register_receiver(CMSG_NETIF_FE, netif_ctrlif_rx,
+ CALLBACK_IN_BLOCKING_CONTEXT);
+ send_driver_status(1);
+ err = probe_interfaces();
+ if (err)
+ ctrl_if_unregister_receiver(CMSG_NETIF_FE, netif_ctrlif_rx);
+
+ DPRINTK("< err=%d\n", err);
+ return err;
+ }
+
+ static void vif_suspend(struct net_private *np)
+ {
+ /* Avoid having tx/rx stuff happen until we're ready. */
+ free_irq(np->irq, np->dev);
+ unbind_evtchn_from_irq(np->evtchn);
+ }
+
+ static void vif_resume(struct net_private *np)
+ {
+ /*
+ * Connect regardless of whether IFF_UP flag set.
+ * Stop bad things from happening until we're back up.
+ */
+ np->backend_state = BEST_DISCONNECTED;
+ memset(np->tx, 0, PAGE_SIZE);
+ memset(np->rx, 0, PAGE_SIZE);
+
+ send_interface_connect(np);
+ }
+
+ void netif_suspend(void)
+ {
+ struct list_head *ent;
+ struct net_private *np;
+
+ list_for_each (ent, &dev_list) {
+ np = list_entry(ent, struct net_private, list);
+ vif_suspend(np);
+ }
+ }
+
+ void netif_resume(void)
+ {
+ struct list_head *ent;
+ struct net_private *np;
+
+ list_for_each (ent, &dev_list) {
+ np = list_entry(ent, struct net_private, list);
+ vif_resume(np);
+ }
+ }
+
+
+ module_init(netif_init);
+
--- /dev/null
- unsigned long m2p_start_mfn =
- HYPERVISOR_shared_info->arch.mfn_to_pfn_start;
-
- if( put_user( m2p_start_mfn, (unsigned long *) data ) )
- ret = -EFAULT;
- else
- ret = 0;
+ /******************************************************************************
+ * privcmd.c
+ *
+ * Interface to privileged domain-0 commands.
+ *
+ * Copyright (c) 2002-2004, K A Fraser, B Dragovic
+ */
+
+ #include <linux/config.h>
+ #include <linux/kernel.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/string.h>
+ #include <linux/errno.h>
+ #include <linux/mm.h>
+ #include <linux/mman.h>
+ #include <linux/swap.h>
+ #include <linux/smp_lock.h>
+ #include <linux/highmem.h>
+ #include <linux/pagemap.h>
+ #include <linux/seq_file.h>
+
+ #include <asm/pgalloc.h>
+ #include <asm/pgtable.h>
+ #include <asm/uaccess.h>
+ #include <asm/tlb.h>
+ #include <asm-xen/linux-public/privcmd.h>
+ #include <asm-xen/xen-public/dom0_ops.h>
+ #include <asm-xen/xen_proc.h>
+
+ static struct proc_dir_entry *privcmd_intf;
+
+ static int privcmd_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long data)
+ {
+ int ret = -ENOSYS;
+
+ switch ( cmd )
+ {
+ case IOCTL_PRIVCMD_HYPERCALL:
+ {
+ privcmd_hypercall_t hypercall;
+
+ if ( copy_from_user(&hypercall, (void *)data, sizeof(hypercall)) )
+ return -EFAULT;
+
+ __asm__ __volatile__ (
+ "pushl %%ebx; pushl %%ecx; pushl %%edx; pushl %%esi; pushl %%edi; "
+ "movl 4(%%eax),%%ebx ;"
+ "movl 8(%%eax),%%ecx ;"
+ "movl 12(%%eax),%%edx ;"
+ "movl 16(%%eax),%%esi ;"
+ "movl 20(%%eax),%%edi ;"
+ "movl (%%eax),%%eax ;"
+ TRAP_INSTR "; "
+ "popl %%edi; popl %%esi; popl %%edx; popl %%ecx; popl %%ebx"
+ : "=a" (ret) : "0" (&hypercall) : "memory" );
+
+ }
+ break;
+
+ case IOCTL_PRIVCMD_INITDOMAIN_EVTCHN:
+ {
+ extern int initdom_ctrlif_domcontroller_port;
+ ret = initdom_ctrlif_domcontroller_port;
+ }
+ break;
+
+ #if defined(CONFIG_XEN_PRIVILEGED_GUEST)
+ case IOCTL_PRIVCMD_MMAP:
+ {
+ #define PRIVCMD_MMAP_SZ 32
+ privcmd_mmap_t mmapcmd;
+ privcmd_mmap_entry_t msg[PRIVCMD_MMAP_SZ], *p;
+ int i, rc;
+
+ if ( copy_from_user(&mmapcmd, (void *)data, sizeof(mmapcmd)) )
+ return -EFAULT;
+
+ p = mmapcmd.entry;
+
+ for (i=0; i<mmapcmd.num; i+=PRIVCMD_MMAP_SZ, p+=PRIVCMD_MMAP_SZ)
+ {
+ int j, n = ((mmapcmd.num-i)>PRIVCMD_MMAP_SZ)?
+ PRIVCMD_MMAP_SZ:(mmapcmd.num-i);
+ if ( copy_from_user(&msg, p, n*sizeof(privcmd_mmap_entry_t)) )
+ return -EFAULT;
+
+ for ( j = 0; j < n; j++ )
+ {
+ struct vm_area_struct *vma =
+ find_vma( current->mm, msg[j].va );
+
+ if ( !vma )
+ return -EINVAL;
+
+ if ( msg[j].va > PAGE_OFFSET )
+ return -EINVAL;
+
+ if ( (msg[j].va + (msg[j].npages<<PAGE_SHIFT)) > vma->vm_end )
+ return -EINVAL;
+
+ if ( (rc = direct_remap_area_pages(vma->vm_mm,
+ msg[j].va&PAGE_MASK,
+ msg[j].mfn<<PAGE_SHIFT,
+ msg[j].npages<<PAGE_SHIFT,
+ vma->vm_page_prot,
+ mmapcmd.dom)) < 0 )
+ return rc;
+ }
+ }
+ ret = 0;
+ }
+ break;
+
+ case IOCTL_PRIVCMD_MMAPBATCH:
+ {
+ #define MAX_DIRECTMAP_MMU_QUEUE 130
+ mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *w, *v;
+ privcmd_mmapbatch_t m;
+ struct vm_area_struct *vma = NULL;
+ unsigned long *p, addr;
+ unsigned long mfn;
+ int i;
+
+ if ( copy_from_user(&m, (void *)data, sizeof(m)) )
+ { ret = -EFAULT; goto batch_err; }
+
+ vma = find_vma( current->mm, m.addr );
+
+ if ( !vma )
+ { ret = -EINVAL; goto batch_err; }
+
+ if ( m.addr > PAGE_OFFSET )
+ { ret = -EFAULT; goto batch_err; }
+
+ if ( (m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end )
+ { ret = -EFAULT; goto batch_err; }
+
+ u[0].ptr = MMU_EXTENDED_COMMAND;
+ u[0].val = MMUEXT_SET_FOREIGNDOM;
+ u[0].val |= (unsigned long)m.dom << 16;
+ v = w = &u[1];
+
+ p = m.arr;
+ addr = m.addr;
+ for ( i = 0; i < m.num; i++, addr += PAGE_SIZE, p++ )
+ {
+ if ( get_user(mfn, p) )
+ return -EFAULT;
+
+ v->val = (mfn << PAGE_SHIFT) | pgprot_val(vma->vm_page_prot);
+
+ __direct_remap_area_pages(vma->vm_mm,
+ addr,
+ PAGE_SIZE,
+ v);
+
+ if ( unlikely(HYPERVISOR_mmu_update(u, v - u + 1, NULL) < 0) )
+ put_user( 0xF0000000 | mfn, p );
+
+ v = w;
+ }
+ ret = 0;
+ break;
+
+ batch_err:
+ printk("batch_err ret=%d vma=%p addr=%lx num=%d arr=%p %lx-%lx\n",
+ ret, vma, m.addr, m.num, m.arr, vma->vm_start, vma->vm_end);
+ break;
+ }
+ break;
+ #endif
+
+ case IOCTL_PRIVCMD_GET_MACH2PHYS_START_MFN:
+ {
++ unsigned long m2pv = (unsigned long)machine_to_phys_mapping;
++ pgd_t *pgd = pgd_offset_k(m2pv);
++ pmd_t *pmd = pmd_offset(pgd, m2pv);
++ unsigned long m2p_start_mfn = pmd_val_ma(*pmd) >> PAGE_SHIFT;
++ ret = put_user(m2p_start_mfn, (unsigned long *)data) ? -EFAULT: 0;
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+ }
+
+ static int privcmd_mmap(struct file * file, struct vm_area_struct * vma)
+ {
+ /* DONTCOPY is essential for Xen as copy_page_range is broken. */
+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
+
+ return 0;
+ }
+
+ static struct file_operations privcmd_file_ops = {
+ ioctl : privcmd_ioctl,
+ mmap: privcmd_mmap
+ };
+
+
+ static int __init privcmd_init(void)
+ {
+ if ( !(xen_start_info.flags & SIF_PRIVILEGED) )
+ return 0;
+
+ privcmd_intf = create_xen_proc_entry("privcmd", 0400);
+ if ( privcmd_intf != NULL )
+ privcmd_intf->proc_fops = &privcmd_file_ops;
+
+ return 0;
+ }
+
+ __initcall(privcmd_init);
--- /dev/null
-#define TIMER_IRQ timer_irq
-
+ /*
+ * This file should contain #defines for all of the interrupt vector
+ * numbers used by this architecture.
+ *
+ * In addition, there are some standard defines:
+ *
+ * FIRST_EXTERNAL_VECTOR:
+ * The first free place for external interrupts
+ *
+ * SYSCALL_VECTOR:
+ * The IRQ vector a syscall makes the user to kernel transition
+ * under.
+ *
+ * TIMER_IRQ:
+ * The IRQ number the timer interrupt comes in at.
+ *
+ * NR_IRQS:
+ * The total number of interrupt vectors (including all the
+ * architecture specific interrupts) needed.
+ *
+ */
+ #ifndef _ASM_IRQ_VECTORS_H
+ #define _ASM_IRQ_VECTORS_H
+
+ /*
+ * IDT vectors usable for external interrupt sources start
+ * at 0x20:
+ */
+ #define FIRST_EXTERNAL_VECTOR 0x20
+
+ #define SYSCALL_VECTOR 0x80
+
+ /*
+ * Vectors 0x20-0x2f are used for ISA interrupts.
+ */
+
++#if 0
+ /*
+ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
+ *
+ * some of the following vectors are 'rare', they are merged
+ * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
+ * TLB, reschedule and local APIC vectors are performance-critical.
+ *
+ * Vectors 0xf0-0xfa are free (reserved for future Linux use).
+ */
+ #define SPURIOUS_APIC_VECTOR 0xff
+ #define ERROR_APIC_VECTOR 0xfe
+ #define INVALIDATE_TLB_VECTOR 0xfd
+ #define RESCHEDULE_VECTOR 0xfc
+ #define CALL_FUNCTION_VECTOR 0xfb
+
+ #define THERMAL_APIC_VECTOR 0xf0
+ /*
+ * Local APIC timer IRQ vector is on a different priority level,
+ * to work around the 'lost local interrupt if more than 2 IRQ
+ * sources per level' errata.
+ */
+ #define LOCAL_TIMER_VECTOR 0xef
++#endif
+
+ /*
+ * First APIC vector available to drivers: (vectors 0x30-0xee)
+ * we start at 0x31 to spread out vectors evenly between priority
+ * levels. (0x80 is the syscall vector)
+ */
+ #define FIRST_DEVICE_VECTOR 0x31
+ #define FIRST_SYSTEM_VECTOR 0xef
+
-#if 0
+ /*
+ * 16 8259A IRQ's, 208 potential APIC interrupt sources.
+ * Right now the APIC is mostly only used for SMP.
+ * 256 vectors is an architectural limit. (we can have
+ * more than 256 devices theoretically, but they will
+ * have to use shared interrupts)
+ * Since vectors 0x00-0x1f are used/reserved for the CPU,
+ * the usable vector space is 0x20-0xff (224 vectors)
+ */
+
-#endif
++#define NR_IPIS 8
++
++#define RESCHEDULE_VECTOR 1
++#define INVALIDATE_TLB_VECTOR 2
++#define CALL_FUNCTION_VECTOR 3
++
+ /*
+ * The maximum number of vectors supported by i386 processors
+ * is limited to 256. For processors other than i386, NR_VECTORS
+ * should be changed accordingly.
+ */
+ #define NR_VECTORS 256
-#define NR_PIRQS 128
+
+ #define FPU_IRQ 13
+
+ #define FIRST_VM86_IRQ 3
+ #define LAST_VM86_IRQ 15
+ #define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
+
+ /*
+ * The flat IRQ space is divided into two regions:
+ * 1. A one-to-one mapping of real physical IRQs. This space is only used
+ * if we have physical device-access privilege. This region is at the
+ * start of the IRQ space so that existing device drivers do not need
+ * to be modified to translate physical IRQ numbers into our IRQ space.
+ * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
+ * are bound using the provided bind/unbind functions.
+ */
+
+ #define PIRQ_BASE 0
-#define NR_DYNIRQS 128
++#define NR_PIRQS 256
+
+ #define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS)
++#define NR_DYNIRQS 256
+
+ #define NR_IRQS (NR_PIRQS + NR_DYNIRQS)
+ #define NR_IRQ_VECTORS NR_IRQS
+
+ #define pirq_to_irq(_x) ((_x) + PIRQ_BASE)
+ #define irq_to_pirq(_x) ((_x) - PIRQ_BASE)
+
+ #define dynirq_to_irq(_x) ((_x) + DYNIRQ_BASE)
+ #define irq_to_dynirq(_x) ((_x) - DYNIRQ_BASE)
+
+ #ifndef __ASSEMBLY__
+ /* Dynamic binding of event channels and VIRQ sources to Linux IRQ space. */
+ extern int bind_virq_to_irq(int virq);
+ extern void unbind_virq_from_irq(int virq);
++extern int bind_ipi_on_cpu_to_irq(int cpu, int ipi);
++extern void unbind_ipi_on_cpu_from_irq(int cpu, int ipi);
+ extern int bind_evtchn_to_irq(int evtchn);
+ extern void unbind_evtchn_from_irq(int evtchn);
+
+ extern void irq_suspend(void);
+ extern void irq_resume(void);
+ #endif /* __ASSEMBLY__ */
+
+ #endif /* _ASM_IRQ_VECTORS_H */
--- /dev/null
-extern unsigned long *phys_to_machine_mapping;
-#define pfn_to_mfn(_pfn) (phys_to_machine_mapping[(_pfn)])
-#define mfn_to_pfn(_mfn) (machine_to_phys_mapping[(_mfn)])
+ #ifndef _I386_PAGE_H
+ #define _I386_PAGE_H
+
+ /* PAGE_SHIFT determines the page size */
+ #define PAGE_SHIFT 12
+ #define PAGE_SIZE (1UL << PAGE_SHIFT)
+ #define PAGE_MASK (~(PAGE_SIZE-1))
+
+ #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
+ #define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
+
+ #ifdef __KERNEL__
+ #ifndef __ASSEMBLY__
+
+ #include <linux/config.h>
+ #include <linux/string.h>
+ #include <linux/types.h>
+ #include <asm-xen/xen-public/xen.h>
+ #include <asm-xen/foreign_page.h>
+
+ #define arch_free_page(_page,_order) \
+ ({ int foreign = PageForeign(_page); \
+ if (foreign) \
+ (PageForeignDestructor(_page))(_page); \
+ foreign; \
+ })
+ #define HAVE_ARCH_FREE_PAGE
+
+ #ifdef CONFIG_XEN_SCRUB_PAGES
+ #define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
+ #else
+ #define scrub_pages(_p,_n) ((void)0)
+ #endif
+
+ #ifdef CONFIG_X86_USE_3DNOW
+
+ #include <asm/mmx.h>
+
+ #define clear_page(page) mmx_clear_page((void *)(page))
+ #define copy_page(to,from) mmx_copy_page(to,from)
+
+ #else
+
+ #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
+ #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+
+ /*
+ * On older X86 processors it's not a win to use MMX here it seems.
+ * Maybe the K6-III ?
+ */
+
+ #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
+ #define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
+
+ #endif
+
+ #define clear_user_page(page, vaddr, pg) clear_page(page)
+ #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+
+ /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
++extern unsigned int *phys_to_machine_mapping;
++#define pfn_to_mfn(_pfn) ((unsigned long)(phys_to_machine_mapping[(_pfn)]))
++#define mfn_to_pfn(_mfn) ((unsigned long)(machine_to_phys_mapping[(_mfn)]))
+ static inline unsigned long phys_to_machine(unsigned long phys)
+ {
+ unsigned long machine = pfn_to_mfn(phys >> PAGE_SHIFT);
+ machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
+ return machine;
+ }
+ static inline unsigned long machine_to_phys(unsigned long machine)
+ {
+ unsigned long phys = mfn_to_pfn(machine >> PAGE_SHIFT);
+ phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
+ return phys;
+ }
+
+ /*
+ * These are used to make use of C type-checking..
+ */
+ extern int nx_enabled;
+ #ifdef CONFIG_X86_PAE
+ extern unsigned long long __supported_pte_mask;
+ typedef struct { unsigned long pte_low, pte_high; } pte_t;
+ typedef struct { unsigned long long pmd; } pmd_t;
+ typedef struct { unsigned long long pgd; } pgd_t;
+ typedef struct { unsigned long long pgprot; } pgprot_t;
+ #define pmd_val(x) ((x).pmd)
+ #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
+ #define __pmd(x) ((pmd_t) { (x) } )
+ #define HPAGE_SHIFT 21
+ #else
+ typedef struct { unsigned long pte_low; } pte_t;
+ typedef struct { unsigned long pgd; } pgd_t;
+ typedef struct { unsigned long pgprot; } pgprot_t;
+ #define boot_pte_t pte_t /* or would you rather have a typedef */
+ #define pte_val(x) (((x).pte_low & 1) ? machine_to_phys((x).pte_low) : \
+ (x).pte_low)
+ #define pte_val_ma(x) ((x).pte_low)
+ #define HPAGE_SHIFT 22
+ #endif
+ #define PTE_MASK PAGE_MASK
+
+ #ifdef CONFIG_HUGETLB_PAGE
+ #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
+ #define HPAGE_MASK (~(HPAGE_SIZE - 1))
+ #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+ #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+ #endif
+
+
+ static inline unsigned long pgd_val(pgd_t x)
+ {
+ unsigned long ret = x.pgd;
+ if (ret) ret = machine_to_phys(ret);
+ return ret;
+ }
++#define pgd_val_ma(x) ((x).pgd)
+ #define pgprot_val(x) ((x).pgprot)
+
+ static inline pte_t __pte(unsigned long x)
+ {
+ if (x & 1) x = phys_to_machine(x);
+ return ((pte_t) { (x) });
+ }
+ #define __pte_ma(x) ((pte_t) { (x) } )
+ static inline pgd_t __pgd(unsigned long x)
+ {
+ if ((x & 1)) x = phys_to_machine(x);
+ return ((pgd_t) { (x) });
+ }
+ #define __pgprot(x) ((pgprot_t) { (x) } )
+
+ #endif /* !__ASSEMBLY__ */
+
+ /* to align the pointer to the (next) page boundary */
+ #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+ /*
+ * This handles the memory map.. We could make this a config
+ * option, but too many people screw it up, and too few need
+ * it.
+ *
+ * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
+ * a virtual address space of one gigabyte, which limits the
+ * amount of physical memory you can use to about 950MB.
+ *
+ * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
+ * and CONFIG_HIGHMEM64G options in the kernel configuration.
+ */
+
+ #ifndef __ASSEMBLY__
+
+ /*
+ * This much address space is reserved for vmalloc() and iomap()
+ * as well as fixmap mappings.
+ */
+ extern unsigned int __VMALLOC_RESERVE;
+
+ /* Pure 2^n version of get_order */
+ static __inline__ int get_order(unsigned long size)
+ {
+ int order;
+
+ size = (size-1) >> (PAGE_SHIFT-1);
+ order = -1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ return order;
+ }
+
+ extern int sysctl_legacy_va_layout;
+
+ #endif /* __ASSEMBLY__ */
+
+ #ifdef __ASSEMBLY__
+ #define __PAGE_OFFSET (0xC0000000)
+ #else
+ #define __PAGE_OFFSET (0xC0000000UL)
+ #endif
+
+
+ #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
+ #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
+ #define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE)
+ #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
+ #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
+ #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+ #ifndef CONFIG_DISCONTIGMEM
+ #define pfn_to_page(pfn) (mem_map + (pfn))
+ #define page_to_pfn(page) ((unsigned long)((page) - mem_map))
+ #define pfn_valid(pfn) ((pfn) < max_mapnr)
+ #endif /* !CONFIG_DISCONTIGMEM */
+ #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+
+ #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+ #define VM_DATA_DEFAULT_FLAGS \
+ (VM_READ | VM_WRITE | \
+ ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+ /* VIRT <-> MACHINE conversion */
+ #define virt_to_machine(_a) (phys_to_machine(__pa(_a)))
+ #define machine_to_virt(_m) (__va(machine_to_phys(_m)))
+
+ #endif /* __KERNEL__ */
+
+ #endif /* _I386_PAGE_H */
--- /dev/null
-#define INVALID_P2M_ENTRY (~0UL)
+ #ifndef _I386_PGTABLE_2LEVEL_H
+ #define _I386_PGTABLE_2LEVEL_H
+
+ #include <asm-generic/pgtable-nopmd.h>
+
+ #define pte_ERROR(e) \
+ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
+ #define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+ /*
+ * Certain architectures need to do special things when PTEs
+ * within a page table are directly modified. Thus, the following
+ * hook is made available.
+ */
+ #define set_pte_batched(pteptr, pteval) \
+ queue_l1_entry_update(pteptr, (pteval).pte_low)
++
++#ifdef CONFIG_SMP
++#define set_pte(pteptr, pteval) xen_l1_entry_update(pteptr, (pteval).pte_low)
++#if 0
++do { \
++ (*(pteptr) = pteval); \
++ HYPERVISOR_xen_version(0); \
++} while (0)
++#endif
++#define set_pte_atomic(pteptr, pteval) set_pte(pteptr, pteval)
++#else
+ #define set_pte(pteptr, pteval) (*(pteptr) = pteval)
+ #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
++#endif
+ #define set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval))
+
+ /*
+ * A note on implementation of this atomic 'get-and-clear' operation.
+ * This is actually very simple because Xen Linux can only run on a single
+ * processor. Therefore, we cannot race other processors setting the 'accessed'
+ * or 'dirty' bits on a page-table entry.
+ * Even if pages are shared between domains, that is not a problem because
+ * each domain will have separate page tables, with their own versions of
+ * accessed & dirty state.
+ */
+ static inline pte_t ptep_get_and_clear(pte_t *xp)
+ {
+ pte_t pte = *xp;
+ if (pte.pte_low)
+ set_pte(xp, __pte_ma(0));
+ return pte;
+ }
+
+ #define pte_same(a, b) ((a).pte_low == (b).pte_low)
+ /*
+ * We detect special mappings in one of two ways:
+ * 1. If the MFN is an I/O page then Xen will set the m2p entry
+ * to be outside our maximum possible pseudophys range.
+ * 2. If the MFN belongs to a different domain then we will certainly
+ * not have MFN in our p2m table. Conversely, if the page is ours,
+ * then we'll have p2m(m2p(MFN))==MFN.
+ * If we detect a special mapping then it doesn't have a 'struct page'.
+ * We force !pfn_valid() by returning an out-of-range pointer.
+ *
+ * NB. These checks require that, for any MFN that is not in our reservation,
+ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
+ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
+ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
+ *
+ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
+ * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
+ * require. In all the cases we care about, the high bit gets shifted out
+ * (e.g., phys_to_machine()) so behaviour there is correct.
+ */
++#define INVALID_P2M_ENTRY (~0U)
+ #define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1)))
+ #define pte_pfn(_pte) \
+ ({ \
+ unsigned long mfn = (_pte).pte_low >> PAGE_SHIFT; \
+ unsigned long pfn = mfn_to_pfn(mfn); \
+ if ((pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn)) \
+ pfn = max_mapnr; /* special: force !pfn_valid() */ \
+ pfn; \
+ })
+
+ #define pte_page(_pte) pfn_to_page(pte_pfn(_pte))
+
+ #define pte_none(x) (!(x).pte_low)
+ #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+ #define pfn_pte_ma(pfn, prot) __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+ #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+
+ #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+
+ #define pmd_page_kernel(pmd) \
+ ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+
+ /*
+ * All present user pages are user-executable:
+ */
+ static inline int pte_exec(pte_t pte)
+ {
+ return pte_user(pte);
+ }
+
+ /*
+ * All present pages are kernel-executable:
+ */
+ static inline int pte_exec_kernel(pte_t pte)
+ {
+ return 1;
+ }
+
+ /*
+ * Bits 0, 6 and 7 are taken, split up the 29 bits of offset
+ * into this range:
+ */
+ #define PTE_FILE_MAX_BITS 29
+
+ #define pte_to_pgoff(pte) \
+ ((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 ))
+
+ #define pgoff_to_pte(off) \
+ ((pte_t) { (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE })
+
+ /* Encode and de-code a swap entry */
+ #define __swp_type(x) (((x).val >> 1) & 0x1f)
+ #define __swp_offset(x) ((x).val >> 8)
+ #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
+ #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+ #endif /* _I386_PGTABLE_2LEVEL_H */
--- /dev/null
- HYPERVISOR_update_va_mapping((__address)>>PAGE_SHIFT, (__entry), UVMF_INVLPG); \
+ #ifndef _I386_PGTABLE_H
+ #define _I386_PGTABLE_H
+
+ #include <linux/config.h>
+ #include <asm-xen/hypervisor.h>
+
+ /*
+ * The Linux memory management assumes a three-level page table setup. On
+ * the i386, we use that, but "fold" the mid level into the top-level page
+ * table, so that we physically have the same two-level page table as the
+ * i386 mmu expects.
+ *
+ * This file contains the functions and defines necessary to modify and use
+ * the i386 page table tree.
+ */
+ #ifndef __ASSEMBLY__
+ #include <asm/processor.h>
+ #include <asm/fixmap.h>
+ #include <linux/threads.h>
+
+ #ifndef _I386_BITOPS_H
+ #include <asm/bitops.h>
+ #endif
+
+ #include <linux/slab.h>
+ #include <linux/list.h>
+ #include <linux/spinlock.h>
+
+ /*
+ * ZERO_PAGE is a global shared page that is always zero: used
+ * for zero-mapped memory areas etc..
+ */
+ #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+ extern unsigned long empty_zero_page[1024];
+ extern pgd_t swapper_pg_dir[1024];
+ extern kmem_cache_t *pgd_cache;
+ extern kmem_cache_t *pmd_cache;
+ extern kmem_cache_t *pte_cache;
+ extern spinlock_t pgd_lock;
+ extern struct page *pgd_list;
+
+ void pte_ctor(void *, kmem_cache_t *, unsigned long);
+ void pte_dtor(void *, kmem_cache_t *, unsigned long);
+ void pmd_ctor(void *, kmem_cache_t *, unsigned long);
+ void pgd_ctor(void *, kmem_cache_t *, unsigned long);
+ void pgd_dtor(void *, kmem_cache_t *, unsigned long);
+ void pgtable_cache_init(void);
+ void paging_init(void);
+
+ /*
+ * The Linux x86 paging architecture is 'compile-time dual-mode', it
+ * implements both the traditional 2-level x86 page tables and the
+ * newer 3-level PAE-mode page tables.
+ */
+ #ifdef CONFIG_X86_PAE
+ # include <asm/pgtable-3level-defs.h>
+ # define PMD_SIZE (1UL << PMD_SHIFT)
+ # define PMD_MASK (~(PMD_SIZE-1))
+ #else
+ # include <asm/pgtable-2level-defs.h>
+ #endif
+
+ #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+ #define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+ #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
+ #define FIRST_USER_PGD_NR 0
+
+ #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
+ #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
+
+ #define TWOLEVEL_PGDIR_SHIFT 22
+ #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
+ #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
+
+ /* Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 8MB value just means that there will be a 8MB "hole" after the
+ * physical memory until the kernel virtual memory starts. That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ */
+ #define VMALLOC_OFFSET (8*1024*1024)
+ #define VMALLOC_START (((unsigned long) high_memory + vmalloc_earlyreserve + \
+ 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
+ #ifdef CONFIG_HIGHMEM
+ # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
+ #else
+ # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
+ #endif
+
+ extern void *high_memory;
+ extern unsigned long vmalloc_earlyreserve;
+
+ /*
+ * The 4MB page is guessing.. Detailed in the infamous "Chapter H"
+ * of the Pentium details, but assuming intel did the straightforward
+ * thing, this bit set in the page directory entry just means that
+ * the page directory entry points directly to a 4MB-aligned block of
+ * memory.
+ */
+ #define _PAGE_BIT_PRESENT 0
+ #define _PAGE_BIT_RW 1
+ #define _PAGE_BIT_USER 2
+ #define _PAGE_BIT_PWT 3
+ #define _PAGE_BIT_PCD 4
+ #define _PAGE_BIT_ACCESSED 5
+ #define _PAGE_BIT_DIRTY 6
+ #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */
+ #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
+ #define _PAGE_BIT_UNUSED1 9 /* available for programmer */
+ #define _PAGE_BIT_UNUSED2 10
+ #define _PAGE_BIT_UNUSED3 11
+ #define _PAGE_BIT_NX 63
+
+ #define _PAGE_PRESENT 0x001
+ #define _PAGE_RW 0x002
+ #define _PAGE_USER 0x004
+ #define _PAGE_PWT 0x008
+ #define _PAGE_PCD 0x010
+ #define _PAGE_ACCESSED 0x020
+ #define _PAGE_DIRTY 0x040
+ #define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */
+ #define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */
+ #define _PAGE_UNUSED1 0x200 /* available for programmer */
+ #define _PAGE_UNUSED2 0x400
+ #define _PAGE_UNUSED3 0x800
+
+ #define _PAGE_FILE 0x040 /* set:pagecache unset:swap */
+ #define _PAGE_PROTNONE 0x080 /* If not present */
+ #ifdef CONFIG_X86_PAE
+ #define _PAGE_NX (1ULL<<_PAGE_BIT_NX)
+ #else
+ #define _PAGE_NX 0
+ #endif
+
+ #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
+ #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
+ #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+ #define PAGE_NONE \
+ __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
+ #define PAGE_SHARED \
+ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
+
+ #define PAGE_SHARED_EXEC \
+ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
+ #define PAGE_COPY_NOEXEC \
+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
+ #define PAGE_COPY_EXEC \
+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+ #define PAGE_COPY \
+ PAGE_COPY_NOEXEC
+ #define PAGE_READONLY \
+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
+ #define PAGE_READONLY_EXEC \
+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+
+ #define _PAGE_KERNEL \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
+ #define _PAGE_KERNEL_EXEC \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
+
+ extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
+ #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
+ #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD)
+ #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
+ #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
+
+ #define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
+ #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
+ #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
+ #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
+ #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
+ #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
+
+ /*
+ * The i386 can't do page protection for execute, and considers that
+ * the same are read. Also, write permissions imply read permissions.
+ * This is the closest we can get..
+ */
+ #define __P000 PAGE_NONE
+ #define __P001 PAGE_READONLY
+ #define __P010 PAGE_COPY
+ #define __P011 PAGE_COPY
+ #define __P100 PAGE_READONLY_EXEC
+ #define __P101 PAGE_READONLY_EXEC
+ #define __P110 PAGE_COPY_EXEC
+ #define __P111 PAGE_COPY_EXEC
+
+ #define __S000 PAGE_NONE
+ #define __S001 PAGE_READONLY
+ #define __S010 PAGE_SHARED
+ #define __S011 PAGE_SHARED
+ #define __S100 PAGE_READONLY_EXEC
+ #define __S101 PAGE_READONLY_EXEC
+ #define __S110 PAGE_SHARED_EXEC
+ #define __S111 PAGE_SHARED_EXEC
+
+ /*
+ * Define this if things work differently on an i386 and an i486:
+ * it will (on an i486) warn about kernel memory accesses that are
+ * done without a 'verify_area(VERIFY_WRITE,..)'
+ */
+ #undef TEST_VERIFY_AREA
+
+ /* The boot page tables (all created as a single array) */
+ extern unsigned long pg0[];
+
+ #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
+ #define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0)
+
+ #define pmd_none(x) (!pmd_val(x))
+ /* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
+ can temporarily clear it. */
+ #define pmd_present(x) (pmd_val(x))
+ /* pmd_clear below */
+ #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
+
+
+ #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
+
+ /*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+ static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
+ static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
+ static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; }
+ static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; }
+ static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; }
+
+ /*
+ * The following only works if pte_present() is not true.
+ */
+ static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; }
+
+ static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
+ static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
+ static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
+ static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
+ static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; }
+ static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
+ static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
+ static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; }
+ static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
+ static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
+
+ #ifdef CONFIG_X86_PAE
+ # include <asm/pgtable-3level.h>
+ #else
+ # include <asm/pgtable-2level.h>
+ #endif
+
+ static inline int ptep_test_and_clear_dirty(pte_t *ptep)
+ {
+ pte_t pte = *ptep;
+ int ret = pte_dirty(pte);
+ if (ret)
+ xen_l1_entry_update(ptep, pte_mkclean(pte).pte_low);
+ return ret;
+ }
+
+ static inline int ptep_test_and_clear_young(pte_t *ptep)
+ {
+ pte_t pte = *ptep;
+ int ret = pte_young(pte);
+ if (ret)
+ xen_l1_entry_update(ptep, pte_mkold(pte).pte_low);
+ return ret;
+ }
+
+ static inline void ptep_set_wrprotect(pte_t *ptep)
+ {
+ pte_t pte = *ptep;
+ if (pte_write(pte))
+ set_pte(ptep, pte_wrprotect(pte));
+ }
+ static inline void ptep_mkdirty(pte_t *ptep)
+ {
+ pte_t pte = *ptep;
+ if (!pte_dirty(pte))
+ xen_l1_entry_update(ptep, pte_mkdirty(pte).pte_low);
+ }
+
+ /*
+ * Macro to mark a page protection value as "uncacheable". On processors which do not support
+ * it, this is a no-op.
+ */
+ #define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \
+ ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
+
+ /*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+
+ #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+ #define mk_pte_huge(entry) ((entry).pte_low |= _PAGE_PRESENT | _PAGE_PSE)
+
+ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+ {
+ pte.pte_low &= _PAGE_CHG_MASK;
+ pte.pte_low |= pgprot_val(newprot);
+ #ifdef CONFIG_X86_PAE
+ /*
+ * Chop off the NX bit (if present), and add the NX portion of
+ * the newprot (if present):
+ */
+ pte.pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
+ pte.pte_high |= (pgprot_val(newprot) >> 32) & \
+ (__supported_pte_mask >> 32);
+ #endif
+ return pte;
+ }
+
+ #define page_pte(page) page_pte_prot(page, __pgprot(0))
+
+ #define pmd_clear(xp) do { \
+ set_pmd(xp, __pmd(0)); \
+ xen_flush_page_update_queue(); \
+ } while (0)
+
+ #define pmd_large(pmd) \
+ ((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
+
+ /*
+ * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
+ *
+ * this macro returns the index of the entry in the pgd page which would
+ * control the given virtual address
+ */
+ #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+ #define pgd_index_k(addr) pgd_index(addr)
+
+ /*
+ * pgd_offset() returns a (pgd_t *)
+ * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
+ */
+ #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
+
+ /*
+ * a shortcut which implies the use of the kernel's pgd, instead
+ * of a process's
+ */
+ #define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+ /*
+ * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
+ *
+ * this macro returns the index of the entry in the pmd page which would
+ * control the given virtual address
+ */
+ #define pmd_index(address) \
+ (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+
+ /*
+ * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
+ *
+ * this macro returns the index of the entry in the pte page which would
+ * control the given virtual address
+ */
+ #define pte_index(address) \
+ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+ #define pte_offset_kernel(dir, address) \
+ ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address))
+
+ /*
+ * Helper function that returns the kernel pagetable entry controlling
+ * the virtual address 'address'. NULL means no pagetable entry present.
+ * NOTE: the return type is pte_t but if the pmd is PSE then we return it
+ * as a pte too.
+ */
+ extern pte_t *lookup_address(unsigned long address);
+
+ /*
+ * Make a given kernel text page executable/non-executable.
+ * Returns the previous executability setting of that page (which
+ * is used to restore the previous state). Used by the SMP bootup code.
+ * NOTE: this is an __init function for security reasons.
+ */
+ #ifdef CONFIG_X86_PAE
+ extern int set_kernel_exec(unsigned long vaddr, int enable);
+ #else
+ static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}
+ #endif
+
+ extern void noexec_setup(const char *str);
+
+ #if defined(CONFIG_HIGHPTE)
+ #define pte_offset_map(dir, address) \
+ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + \
+ pte_index(address))
+ #define pte_offset_map_nested(dir, address) \
+ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + \
+ pte_index(address))
+ #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
+ #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
+ #else
+ #define pte_offset_map(dir, address) \
+ ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
+ #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
+ #define pte_unmap(pte) do { } while (0)
+ #define pte_unmap_nested(pte) do { } while (0)
+ #endif
+
+ /*
+ * The i386 doesn't have any external MMU info: the kernel page
+ * tables contain all the necessary information.
+ *
+ * Also, we only update the dirty/accessed state if we set
+ * the dirty bit by hand in the kernel, since the hardware
+ * will do the accessed bit for us, and we don't want to
+ * race with other CPU's that might be updating the dirty
+ * bit at the same time.
+ */
+ #define update_mmu_cache(vma,address,pte) do { } while (0)
+ #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+
+ #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+ do { \
+ if (__dirty) { \
+ if ( likely((__vma)->vm_mm == current->mm) ) { \
+ xen_flush_page_update_queue(); \
- HYPERVISOR_update_va_mapping((__address)>>PAGE_SHIFT, \
++ HYPERVISOR_update_va_mapping((__address), (__entry), UVMF_INVLPG); \
+ } else { \
+ xen_l1_entry_update((__ptep), (__entry).pte_low); \
+ flush_tlb_page((__vma), (__address)); \
+ } \
+ } \
+ } while (0)
+
+ #define __HAVE_ARCH_PTEP_ESTABLISH
+ #define ptep_establish(__vma, __address, __ptep, __entry) \
+ do { \
+ ptep_set_access_flags(__vma, __address, __ptep, __entry, 1); \
+ } while (0)
+
+ #define __HAVE_ARCH_PTEP_ESTABLISH_NEW
+ #define ptep_establish_new(__vma, __address, __ptep, __entry) \
+ do { \
+ if (likely((__vma)->vm_mm == current->mm)) { \
+ xen_flush_page_update_queue(); \
++ HYPERVISOR_update_va_mapping((__address), \
+ __entry, 0); \
+ } else { \
+ xen_l1_entry_update((__ptep), (__entry).pte_low); \
+ } \
+ } while (0)
+
+ /* NOTE: make_page* callers must call flush_page_update_queue() */
+ void make_lowmem_page_readonly(void *va);
+ void make_lowmem_page_writable(void *va);
+ void make_page_readonly(void *va);
+ void make_page_writable(void *va);
+ void make_pages_readonly(void *va, unsigned int nr);
+ void make_pages_writable(void *va, unsigned int nr);
+
+ #define arbitrary_virt_to_machine(__va) \
+ ({ \
+ pgd_t *__pgd = pgd_offset_k((unsigned long)(__va)); \
+ pud_t *__pud = pud_offset(__pgd, (unsigned long)(__va)); \
+ pmd_t *__pmd = pmd_offset(__pud, (unsigned long)(__va)); \
+ pte_t *__pte = pte_offset_kernel(__pmd, (unsigned long)(__va)); \
+ unsigned long __pa = (*(unsigned long *)__pte) & PAGE_MASK; \
+ __pa | ((unsigned long)(__va) & (PAGE_SIZE-1)); \
+ })
+
+ #endif /* !__ASSEMBLY__ */
+
+ #ifndef CONFIG_DISCONTIGMEM
+ #define kern_addr_valid(addr) (1)
+ #endif /* !CONFIG_DISCONTIGMEM */
+
+ #define DOMID_LOCAL (0xFFFFU)
+ int direct_remap_area_pages(struct mm_struct *mm,
+ unsigned long address,
+ unsigned long machine_addr,
+ unsigned long size,
+ pgprot_t prot,
+ domid_t domid);
+ int __direct_remap_area_pages(struct mm_struct *mm,
+ unsigned long address,
+ unsigned long size,
+ mmu_update_t *v);
+
+ #define io_remap_page_range(vma,from,phys,size,prot) \
+ direct_remap_area_pages(vma->vm_mm,from,phys,size,prot,DOMID_IO)
+
+ #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+ #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
+ #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+ #define __HAVE_ARCH_PTEP_SET_WRPROTECT
+ #define __HAVE_ARCH_PTEP_MKDIRTY
+ #define __HAVE_ARCH_PTE_SAME
+ #include <asm-generic/pgtable.h>
+
+ #endif /* _I386_PGTABLE_H */
--- /dev/null
-extern pgd_t *cur_pgd; /* XXXsmp */
+ /*
+ * include/asm-i386/processor.h
+ *
+ * Copyright (C) 1994 Linus Torvalds
+ */
+
+ #ifndef __ASM_I386_PROCESSOR_H
+ #define __ASM_I386_PROCESSOR_H
+
+ #include <asm/vm86.h>
+ #include <asm/math_emu.h>
+ #include <asm/segment.h>
+ #include <asm/page.h>
+ #include <asm/types.h>
+ #include <asm/sigcontext.h>
+ #include <asm/cpufeature.h>
+ #include <asm/msr.h>
+ #include <asm/system.h>
+ #include <linux/cache.h>
+ #include <linux/config.h>
+ #include <linux/threads.h>
+ #include <asm/percpu.h>
+
+ /* flag for disabling the tsc */
+ extern int tsc_disable;
+
+ struct desc_struct {
+ unsigned long a,b;
+ };
+
+ #define desc_empty(desc) \
+ (!((desc)->a + (desc)->b))
+
+ #define desc_equal(desc1, desc2) \
+ (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
+ /*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+ #define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
+
+ /*
+ * CPU type and hardware bug flags. Kept separately for each CPU.
+ * Members of this structure are referenced in head.S, so think twice
+ * before touching them. [mj]
+ */
+
+ struct cpuinfo_x86 {
+ __u8 x86; /* CPU family */
+ __u8 x86_vendor; /* CPU vendor */
+ __u8 x86_model;
+ __u8 x86_mask;
+ char wp_works_ok; /* It doesn't on 386's */
+ char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
+ char hard_math;
+ char rfu;
+ int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
+ unsigned long x86_capability[NCAPINTS];
+ char x86_vendor_id[16];
+ char x86_model_id[64];
+ int x86_cache_size; /* in KB - valid for CPUS which support this
+ call */
+ int x86_cache_alignment; /* In bytes */
+ int fdiv_bug;
+ int f00f_bug;
+ int coma_bug;
+ unsigned long loops_per_jiffy;
+ unsigned char x86_num_cores;
+ } __attribute__((__aligned__(SMP_CACHE_BYTES)));
+
+ #define X86_VENDOR_INTEL 0
+ #define X86_VENDOR_CYRIX 1
+ #define X86_VENDOR_AMD 2
+ #define X86_VENDOR_UMC 3
+ #define X86_VENDOR_NEXGEN 4
+ #define X86_VENDOR_CENTAUR 5
+ #define X86_VENDOR_RISE 6
+ #define X86_VENDOR_TRANSMETA 7
+ #define X86_VENDOR_NSC 8
+ #define X86_VENDOR_NUM 9
+ #define X86_VENDOR_UNKNOWN 0xff
+
+ /*
+ * capabilities of CPUs
+ */
+
+ extern struct cpuinfo_x86 boot_cpu_data;
+ extern struct cpuinfo_x86 new_cpu_data;
+ extern struct tss_struct doublefault_tss;
+ DECLARE_PER_CPU(struct tss_struct, init_tss);
- cur_pgd = pgdir; /* XXXsmp */ \
++DECLARE_PER_CPU(pgd_t *, cur_pgd);
+
+ #ifdef CONFIG_SMP
+ extern struct cpuinfo_x86 cpu_data[];
+ #define current_cpu_data cpu_data[smp_processor_id()]
+ #else
+ #define cpu_data (&boot_cpu_data)
+ #define current_cpu_data boot_cpu_data
+ #endif
+
+ extern int phys_proc_id[NR_CPUS];
+ extern char ignore_fpu_irq;
+
+ extern void identify_cpu(struct cpuinfo_x86 *);
+ extern void print_cpu_info(struct cpuinfo_x86 *);
+ extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
+ extern void dodgy_tsc(void);
+
+ #ifdef CONFIG_X86_HT
+ extern void detect_ht(struct cpuinfo_x86 *c);
+ #else
+ static inline void detect_ht(struct cpuinfo_x86 *c) {}
+ #endif
+
+ /*
+ * EFLAGS bits
+ */
+ #define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
+ #define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
+ #define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
+ #define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
+ #define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
+ #define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
+ #define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
+ #define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
+ #define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
+ #define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
+ #define X86_EFLAGS_NT 0x00004000 /* Nested Task */
+ #define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
+ #define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
+ #define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
+ #define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
+ #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
+ #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
+
+ /*
+ * Generic CPUID function
+ * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
+ * resulting in stale register contents being returned.
+ */
+ static inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
+ {
+ __asm__("cpuid"
+ : "=a" (*eax),
+ "=b" (*ebx),
+ "=c" (*ecx),
+ "=d" (*edx)
+ : "0" (op), "c"(0));
+ }
+
+ /*
+ * CPUID functions returning a single datum
+ */
+ static inline unsigned int cpuid_eax(unsigned int op)
+ {
+ unsigned int eax;
+
+ __asm__("cpuid"
+ : "=a" (eax)
+ : "0" (op)
+ : "bx", "cx", "dx");
+ return eax;
+ }
+ static inline unsigned int cpuid_ebx(unsigned int op)
+ {
+ unsigned int eax, ebx;
+
+ __asm__("cpuid"
+ : "=a" (eax), "=b" (ebx)
+ : "0" (op)
+ : "cx", "dx" );
+ return ebx;
+ }
+ static inline unsigned int cpuid_ecx(unsigned int op)
+ {
+ unsigned int eax, ecx;
+
+ __asm__("cpuid"
+ : "=a" (eax), "=c" (ecx)
+ : "0" (op)
+ : "bx", "dx" );
+ return ecx;
+ }
+ static inline unsigned int cpuid_edx(unsigned int op)
+ {
+ unsigned int eax, edx;
+
+ __asm__("cpuid"
+ : "=a" (eax), "=d" (edx)
+ : "0" (op)
+ : "bx", "cx");
+ return edx;
+ }
+
+ #define load_cr3(pgdir) do { \
+ queue_pt_switch(__pa(pgdir)); \
++ per_cpu(cur_pgd, smp_processor_id()) = pgdir; \
+ } while (/* CONSTCOND */0)
+
+
+ /*
+ * Intel CPU features in CR4
+ */
+ #define X86_CR4_VME 0x0001 /* enable vm86 extensions */
+ #define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
+ #define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
+ #define X86_CR4_DE 0x0008 /* enable debugging extensions */
+ #define X86_CR4_PSE 0x0010 /* enable page size extensions */
+ #define X86_CR4_PAE 0x0020 /* enable physical address extensions */
+ #define X86_CR4_MCE 0x0040 /* Machine check enable */
+ #define X86_CR4_PGE 0x0080 /* enable global pages */
+ #define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
+ #define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
+ #define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
+
+ /*
+ * Save the cr4 feature set we're using (ie
+ * Pentium 4MB enable and PPro Global page
+ * enable), so that any CPU's that boot up
+ * after us can get the correct flags.
+ */
+ extern unsigned long mmu_cr4_features;
+
+ static inline void set_in_cr4 (unsigned long mask)
+ {
+ mmu_cr4_features |= mask;
+ switch (mask) {
+ case X86_CR4_OSFXSR:
+ case X86_CR4_OSXMMEXCPT:
+ break;
+ default:
+ do {
+ const char *msg = "Xen unsupported cr4 update\n";
+ (void)HYPERVISOR_console_io(
+ CONSOLEIO_write, __builtin_strlen(msg),
+ (char *)msg);
+ BUG();
+ } while (0);
+ }
+ }
+
+ static inline void clear_in_cr4 (unsigned long mask)
+ {
+ mmu_cr4_features &= ~mask;
+ __asm__("movl %%cr4,%%eax\n\t"
+ "andl %0,%%eax\n\t"
+ "movl %%eax,%%cr4\n"
+ : : "irg" (~mask)
+ :"ax");
+ }
+
+ /*
+ * NSC/Cyrix CPU configuration register indexes
+ */
+
+ #define CX86_PCR0 0x20
+ #define CX86_GCR 0xb8
+ #define CX86_CCR0 0xc0
+ #define CX86_CCR1 0xc1
+ #define CX86_CCR2 0xc2
+ #define CX86_CCR3 0xc3
+ #define CX86_CCR4 0xe8
+ #define CX86_CCR5 0xe9
+ #define CX86_CCR6 0xea
+ #define CX86_CCR7 0xeb
+ #define CX86_PCR1 0xf0
+ #define CX86_DIR0 0xfe
+ #define CX86_DIR1 0xff
+ #define CX86_ARR_BASE 0xc4
+ #define CX86_RCR_BASE 0xdc
+
+ /*
+ * NSC/Cyrix CPU indexed register access macros
+ */
+
+ #define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
+
+ #define setCx86(reg, data) do { \
+ outb((reg), 0x22); \
+ outb((data), 0x23); \
+ } while (0)
+
+ static inline void __monitor(const void *eax, unsigned long ecx,
+ unsigned long edx)
+ {
+ /* "monitor %eax,%ecx,%edx;" */
+ asm volatile(
+ ".byte 0x0f,0x01,0xc8;"
+ : :"a" (eax), "c" (ecx), "d"(edx));
+ }
+
+ static inline void __mwait(unsigned long eax, unsigned long ecx)
+ {
+ /* "mwait %eax,%ecx;" */
+ asm volatile(
+ ".byte 0x0f,0x01,0xc9;"
+ : :"a" (eax), "c" (ecx));
+ }
+
+ /* from system description table in BIOS. Mostly for MCA use, but
+ others may find it useful. */
+ extern unsigned int machine_id;
+ extern unsigned int machine_submodel_id;
+ extern unsigned int BIOS_revision;
+ extern unsigned int mca_pentium_flag;
+
+ /* Boot loader type from the setup header */
+ extern int bootloader_type;
+
+ /*
+ * User space process size: 3GB (default).
+ */
+ #define TASK_SIZE (PAGE_OFFSET)
+
+ /* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+ #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
+
+ #define HAVE_ARCH_PICK_MMAP_LAYOUT
+
+ /*
+ * Size of io_bitmap.
+ */
+ #define IO_BITMAP_BITS 65536
+ #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
+ #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
+ #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
+ #define INVALID_IO_BITMAP_OFFSET 0x8000
+ #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
+
+ struct i387_fsave_struct {
+ long cwd;
+ long swd;
+ long twd;
+ long fip;
+ long fcs;
+ long foo;
+ long fos;
+ long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
+ long status; /* software status information */
+ };
+
+ struct i387_fxsave_struct {
+ unsigned short cwd;
+ unsigned short swd;
+ unsigned short twd;
+ unsigned short fop;
+ long fip;
+ long fcs;
+ long foo;
+ long fos;
+ long mxcsr;
+ long mxcsr_mask;
+ long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
+ long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
+ long padding[56];
+ } __attribute__ ((aligned (16)));
+
+ struct i387_soft_struct {
+ long cwd;
+ long swd;
+ long twd;
+ long fip;
+ long fcs;
+ long foo;
+ long fos;
+ long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
+ unsigned char ftop, changed, lookahead, no_update, rm, alimit;
+ struct info *info;
+ unsigned long entry_eip;
+ };
+
+ union i387_union {
+ struct i387_fsave_struct fsave;
+ struct i387_fxsave_struct fxsave;
+ struct i387_soft_struct soft;
+ };
+
+ typedef struct {
+ unsigned long seg;
+ } mm_segment_t;
+
+ struct thread_struct;
+
+ struct tss_struct {
+ unsigned short back_link,__blh;
+ unsigned long esp0;
+ unsigned short ss0,__ss0h;
+ unsigned long esp1;
+ unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */
+ unsigned long esp2;
+ unsigned short ss2,__ss2h;
+ unsigned long __cr3;
+ unsigned long eip;
+ unsigned long eflags;
+ unsigned long eax,ecx,edx,ebx;
+ unsigned long esp;
+ unsigned long ebp;
+ unsigned long esi;
+ unsigned long edi;
+ unsigned short es, __esh;
+ unsigned short cs, __csh;
+ unsigned short ss, __ssh;
+ unsigned short ds, __dsh;
+ unsigned short fs, __fsh;
+ unsigned short gs, __gsh;
+ unsigned short ldt, __ldth;
+ unsigned short trace, io_bitmap_base;
+ /*
+ * The extra 1 is there because the CPU will access an
+ * additional byte beyond the end of the IO permission
+ * bitmap. The extra byte must be all 1 bits, and must
+ * be within the limit.
+ */
+ unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
+ /*
+ * Cache the current maximum and the last task that used the bitmap:
+ */
+ unsigned long io_bitmap_max;
+ struct thread_struct *io_bitmap_owner;
+ /*
+ * pads the TSS to be cacheline-aligned (size is 0x100)
+ */
+ unsigned long __cacheline_filler[35];
+ /*
+ * .. and then another 0x100 bytes for emergency kernel stack
+ */
+ unsigned long stack[64];
+ } __attribute__((packed));
+
+ #define ARCH_MIN_TASKALIGN 16
+
+ struct thread_struct {
+ /* cached TLS descriptors. */
+ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
+ unsigned long esp0;
+ unsigned long sysenter_cs;
+ unsigned long eip;
+ unsigned long esp;
+ unsigned long fs;
+ unsigned long gs;
+ unsigned int io_pl;
+ /* Hardware debugging registers */
+ unsigned long debugreg[8]; /* %%db0-7 debug registers */
+ /* fault info */
+ unsigned long cr2, trap_no, error_code;
+ /* floating point info */
+ union i387_union i387;
+ /* virtual 86 mode info */
+ struct vm86_struct __user * vm86_info;
+ unsigned long screen_bitmap;
+ unsigned long v86flags, v86mask, saved_esp0;
+ unsigned int saved_fs, saved_gs;
+ /* IO permissions */
+ unsigned long *io_bitmap_ptr;
+ /* max allowed port in the bitmap, in bytes: */
+ unsigned long io_bitmap_max;
+ };
+
+ #define INIT_THREAD { \
+ .vm86_info = NULL, \
+ .sysenter_cs = __KERNEL_CS, \
+ .io_bitmap_ptr = NULL, \
+ }
+
+ /*
+ * Note that the .io_bitmap member must be extra-big. This is because
+ * the CPU will access an additional byte beyond the end of the IO
+ * permission bitmap. The extra byte must be all 1 bits, and must
+ * be within the limit.
+ */
+ #define INIT_TSS { \
+ .esp0 = sizeof(init_stack) + (long)&init_stack, \
+ .ss0 = __KERNEL_DS, \
+ .ss1 = __KERNEL_CS, \
+ .ldt = GDT_ENTRY_LDT, \
+ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
+ .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
+ }
+
+ static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
+ {
+ tss->esp0 = thread->esp0;
+ /* This can only happen when SEP is enabled, no need to test "SEP"arately */
+ if (unlikely(tss->ss1 != thread->sysenter_cs)) {
+ tss->ss1 = thread->sysenter_cs;
+ wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
+ }
+ HYPERVISOR_stack_switch(tss->ss0, tss->esp0);
+ }
+
+ #define start_thread(regs, new_eip, new_esp) do { \
+ __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \
+ set_fs(USER_DS); \
+ regs->xds = __USER_DS; \
+ regs->xes = __USER_DS; \
+ regs->xss = __USER_DS; \
+ regs->xcs = __USER_CS; \
+ regs->eip = new_eip; \
+ regs->esp = new_esp; \
+ } while (0)
+
+ /* Forward declaration, a strange C thing */
+ struct task_struct;
+ struct mm_struct;
+
+ /* Free all resources held by a thread. */
+ extern void release_thread(struct task_struct *);
+
+ /* Prepare to copy thread state - unlazy all lazy status */
+ extern void prepare_to_copy(struct task_struct *tsk);
+
+ /*
+ * create a kernel thread without removing it from tasklists
+ */
+ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
+
+ extern unsigned long thread_saved_pc(struct task_struct *tsk);
+ void show_trace(struct task_struct *task, unsigned long *stack);
+
+ unsigned long get_wchan(struct task_struct *p);
+
+ #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
+ #define KSTK_TOP(info) \
+ ({ \
+ unsigned long *__ptr = (unsigned long *)(info); \
+ (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
+ })
+
+ #define task_pt_regs(task) \
+ ({ \
+ struct pt_regs *__regs__; \
+ __regs__ = (struct pt_regs *)KSTK_TOP((task)->thread_info); \
+ __regs__ - 1; \
+ })
+
+ #define KSTK_EIP(task) (task_pt_regs(task)->eip)
+ #define KSTK_ESP(task) (task_pt_regs(task)->esp)
+
+
+ struct microcode_header {
+ unsigned int hdrver;
+ unsigned int rev;
+ unsigned int date;
+ unsigned int sig;
+ unsigned int cksum;
+ unsigned int ldrver;
+ unsigned int pf;
+ unsigned int datasize;
+ unsigned int totalsize;
+ unsigned int reserved[3];
+ };
+
+ struct microcode {
+ struct microcode_header hdr;
+ unsigned int bits[0];
+ };
+
+ typedef struct microcode microcode_t;
+ typedef struct microcode_header microcode_header_t;
+
+ /* microcode format is extended from prescott processors */
+ struct extended_signature {
+ unsigned int sig;
+ unsigned int pf;
+ unsigned int cksum;
+ };
+
+ struct extended_sigtable {
+ unsigned int count;
+ unsigned int cksum;
+ unsigned int reserved[3];
+ struct extended_signature sigs[0];
+ };
+ /* '6' because it used to be for P6 only (but now covers Pentium 4 as well) */
+ #define MICROCODE_IOCFREE _IO('6',0)
+
+ /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
+ static inline void rep_nop(void)
+ {
+ __asm__ __volatile__("rep;nop": : :"memory");
+ }
+
+ #define cpu_relax() rep_nop()
+
+ /* generic versions from gas */
+ #define GENERIC_NOP1 ".byte 0x90\n"
+ #define GENERIC_NOP2 ".byte 0x89,0xf6\n"
+ #define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n"
+ #define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n"
+ #define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4
+ #define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
+ #define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
+ #define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7
+
+ /* Opteron nops */
+ #define K8_NOP1 GENERIC_NOP1
+ #define K8_NOP2 ".byte 0x66,0x90\n"
+ #define K8_NOP3 ".byte 0x66,0x66,0x90\n"
+ #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
+ #define K8_NOP5 K8_NOP3 K8_NOP2
+ #define K8_NOP6 K8_NOP3 K8_NOP3
+ #define K8_NOP7 K8_NOP4 K8_NOP3
+ #define K8_NOP8 K8_NOP4 K8_NOP4
+
+ /* K7 nops */
+ /* uses eax dependencies (arbitary choice) */
+ #define K7_NOP1 GENERIC_NOP1
+ #define K7_NOP2 ".byte 0x8b,0xc0\n"
+ #define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
+ #define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
+ #define K7_NOP5 K7_NOP4 ASM_NOP1
+ #define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
+ #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
+ #define K7_NOP8 K7_NOP7 ASM_NOP1
+
+ #ifdef CONFIG_MK8
+ #define ASM_NOP1 K8_NOP1
+ #define ASM_NOP2 K8_NOP2
+ #define ASM_NOP3 K8_NOP3
+ #define ASM_NOP4 K8_NOP4
+ #define ASM_NOP5 K8_NOP5
+ #define ASM_NOP6 K8_NOP6
+ #define ASM_NOP7 K8_NOP7
+ #define ASM_NOP8 K8_NOP8
+ #elif defined(CONFIG_MK7)
+ #define ASM_NOP1 K7_NOP1
+ #define ASM_NOP2 K7_NOP2
+ #define ASM_NOP3 K7_NOP3
+ #define ASM_NOP4 K7_NOP4
+ #define ASM_NOP5 K7_NOP5
+ #define ASM_NOP6 K7_NOP6
+ #define ASM_NOP7 K7_NOP7
+ #define ASM_NOP8 K7_NOP8
+ #else
+ #define ASM_NOP1 GENERIC_NOP1
+ #define ASM_NOP2 GENERIC_NOP2
+ #define ASM_NOP3 GENERIC_NOP3
+ #define ASM_NOP4 GENERIC_NOP4
+ #define ASM_NOP5 GENERIC_NOP5
+ #define ASM_NOP6 GENERIC_NOP6
+ #define ASM_NOP7 GENERIC_NOP7
+ #define ASM_NOP8 GENERIC_NOP8
+ #endif
+
+ #define ASM_NOP_MAX 8
+
+ /* Prefetch instructions for Pentium III and AMD Athlon */
+ /* It's not worth to care about 3dnow! prefetches for the K6
+ because they are microcoded there and very slow.
+ However we don't do prefetches for pre XP Athlons currently
+ That should be fixed. */
+ #define ARCH_HAS_PREFETCH
+ extern inline void prefetch(const void *x)
+ {
+ alternative_input(ASM_NOP4,
+ "prefetchnta (%1)",
+ X86_FEATURE_XMM,
+ "r" (x));
+ }
+
+ #define ARCH_HAS_PREFETCH
+ #define ARCH_HAS_PREFETCHW
+ #define ARCH_HAS_SPINLOCK_PREFETCH
+
+ /* 3dnow! prefetch to get an exclusive cache line. Useful for
+ spinlocks to avoid one state transition in the cache coherency protocol. */
+ extern inline void prefetchw(const void *x)
+ {
+ alternative_input(ASM_NOP4,
+ "prefetchw (%1)",
+ X86_FEATURE_3DNOW,
+ "r" (x));
+ }
+ #define spin_lock_prefetch(x) prefetchw(x)
+
+ extern void select_idle_routine(const struct cpuinfo_x86 *c);
+
+ #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
+
+ extern unsigned long boot_option_idle_override;
+
+ #endif /* __ASM_I386_PROCESSOR_H */
--- /dev/null
-#define __BOOT_CS FLAT_GUESTOS_CS
+ #ifndef _ASM_SEGMENT_H
+ #define _ASM_SEGMENT_H
+
+ /*
+ * The layout of the per-CPU GDT under Linux:
+ *
+ * 0 - null
+ * 1 - reserved
+ * 2 - reserved
+ * 3 - reserved
+ *
+ * 4 - unused <==== new cacheline
+ * 5 - unused
+ *
+ * ------- start of TLS (Thread-Local Storage) segments:
+ *
+ * 6 - TLS segment #1 [ glibc's TLS segment ]
+ * 7 - TLS segment #2 [ Wine's %fs Win32 segment ]
+ * 8 - TLS segment #3
+ * 9 - reserved
+ * 10 - reserved
+ * 11 - reserved
+ *
+ * ------- start of kernel segments:
+ *
+ * 12 - kernel code segment <==== new cacheline
+ * 13 - kernel data segment
+ * 14 - default user CS
+ * 15 - default user DS
+ * 16 - TSS
+ * 17 - LDT
+ * 18 - PNPBIOS support (16->32 gate)
+ * 19 - PNPBIOS support
+ * 20 - PNPBIOS support
+ * 21 - PNPBIOS support
+ * 22 - PNPBIOS support
+ * 23 - APM BIOS support
+ * 24 - APM BIOS support
+ * 25 - APM BIOS support
+ *
+ * 26 - unused
+ * 27 - unused
+ * 28 - unused
+ * 29 - unused
+ * 30 - unused
+ * 31 - TSS for double fault handler
+ */
+ #define GDT_ENTRY_TLS_ENTRIES 3
+ #define GDT_ENTRY_TLS_MIN 6
+ #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
+
+ #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
+
+ #define GDT_ENTRY_DEFAULT_USER_CS 14
+ #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3)
+
+ #define GDT_ENTRY_DEFAULT_USER_DS 15
+ #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3)
+
+ #define GDT_ENTRY_KERNEL_BASE 12
+
+ #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
+ #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8 + 1)
+
+ #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
+ #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8 + 1)
+
+ #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
+ #define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5)
+
+ #define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 6)
+ #define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 11)
+
+ #define GDT_ENTRY_DOUBLEFAULT_TSS 31
+
+ /*
+ * The GDT has LAST_RESERVED_GDT_ENTRY + 1 entries
+ */
+ #define GDT_ENTRIES (LAST_RESERVED_GDT_ENTRY + 1)
+
+ #define GDT_SIZE (GDT_ENTRIES * 8)
+
+ /* Simple and small GDT entries for booting only */
+
-#define __BOOT_DS FLAT_GUESTOS_DS
++#define __BOOT_CS FLAT_KERNEL_CS
+
++#define __BOOT_DS FLAT_KERNEL_DS
+
+ /*
+ * The interrupt descriptor table has room for 256 idt's,
+ * the global descriptor table is dependent on the number
+ * of tasks we can have..
+ */
+ #define IDT_ENTRIES 256
+
+ #endif
--- /dev/null
-#include <asm-xen/evtchn.h>
+ #ifndef __ASM_SYSTEM_H
+ #define __ASM_SYSTEM_H
+
+ #include <linux/config.h>
+ #include <linux/kernel.h>
+ #include <linux/bitops.h>
+ #include <asm/synch_bitops.h>
+ #include <asm/segment.h>
+ #include <asm/cpufeature.h>
+ #include <asm-xen/hypervisor.h>
-/* NB. 'clts' is done for us by Xen during virtual trap. */
-#define clts() ((void)0)
+
+ #ifdef __KERNEL__
+
+ struct task_struct; /* one of the stranger aspects of C forward declarations.. */
+ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
+
+ #define switch_to(prev,next,last) do { \
+ unsigned long esi,edi; \
+ asm volatile("pushfl\n\t" \
+ "pushl %%ebp\n\t" \
+ "movl %%esp,%0\n\t" /* save ESP */ \
+ "movl %5,%%esp\n\t" /* restore ESP */ \
+ "movl $1f,%1\n\t" /* save EIP */ \
+ "pushl %6\n\t" /* restore EIP */ \
+ "jmp __switch_to\n" \
+ "1:\t" \
+ "popl %%ebp\n\t" \
+ "popfl" \
+ :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
+ "=a" (last),"=S" (esi),"=D" (edi) \
+ :"m" (next->thread.esp),"m" (next->thread.eip), \
+ "2" (prev), "d" (next)); \
+ } while (0)
+
+ #define _set_base(addr,base) do { unsigned long __pr; \
+ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
+ "rorl $16,%%edx\n\t" \
+ "movb %%dl,%2\n\t" \
+ "movb %%dh,%3" \
+ :"=&d" (__pr) \
+ :"m" (*((addr)+2)), \
+ "m" (*((addr)+4)), \
+ "m" (*((addr)+7)), \
+ "0" (base) \
+ ); } while(0)
+
+ #define _set_limit(addr,limit) do { unsigned long __lr; \
+ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
+ "rorl $16,%%edx\n\t" \
+ "movb %2,%%dh\n\t" \
+ "andb $0xf0,%%dh\n\t" \
+ "orb %%dh,%%dl\n\t" \
+ "movb %%dl,%2" \
+ :"=&d" (__lr) \
+ :"m" (*(addr)), \
+ "m" (*((addr)+6)), \
+ "0" (limit) \
+ ); } while(0)
+
+ #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
+ #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 )
+
+ static inline unsigned long _get_base(char * addr)
+ {
+ unsigned long __base;
+ __asm__("movb %3,%%dh\n\t"
+ "movb %2,%%dl\n\t"
+ "shll $16,%%edx\n\t"
+ "movw %1,%%dx"
+ :"=&d" (__base)
+ :"m" (*((addr)+2)),
+ "m" (*((addr)+4)),
+ "m" (*((addr)+7)));
+ return __base;
+ }
+
+ #define get_base(ldt) _get_base( ((char *)&(ldt)) )
+
+ /*
+ * Load a segment. Fall back on loading the zero
+ * segment if something goes wrong..
+ */
+ #define loadsegment(seg,value) \
+ asm volatile("\n" \
+ "1:\t" \
+ "movl %0,%%" #seg "\n" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3:\t" \
+ "pushl $0\n\t" \
+ "popl %%" #seg "\n\t" \
+ "jmp 2b\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n\t" \
+ ".align 4\n\t" \
+ ".long 1b,3b\n" \
+ ".previous" \
+ : :"m" (*(unsigned int *)&(value)))
+
+ /*
+ * Save a segment register away
+ */
+ #define savesegment(seg, value) \
+ asm volatile("movl %%" #seg ",%0":"=m" (*(int *)&(value)))
+
+ /*
+ * Clear and set 'TS' bit respectively
+ */
-
++#define clts() (HYPERVISOR_fpu_taskswitch(0))
+ #define read_cr0() \
+ BUG();
+ #define write_cr0(x) \
+ BUG();
-#define stts() (HYPERVISOR_fpu_taskswitch())
+ #define read_cr4() \
+ BUG();
+ #define write_cr4(x) \
+ BUG();
-static inline void wbinvd(void)
-{
- mmu_update_t u;
- u.ptr = MMU_EXTENDED_COMMAND;
- u.val = MMUEXT_FLUSH_CACHE;
- (void)HYPERVISOR_mmu_update(&u, 1, NULL);
-}
++#define stts() (HYPERVISOR_fpu_taskswitch(1))
+
+ #endif /* __KERNEL__ */
+
- * critical operations are executed. All critical operatiosn must complete
++#define wbinvd() \
++ __asm__ __volatile__ ("wbinvd": : :"memory");
+
+ static inline unsigned long get_limit(unsigned long segment)
+ {
+ unsigned long __limit;
+ __asm__("lsll %1,%0"
+ :"=r" (__limit):"r" (segment));
+ return __limit+1;
+ }
+
+ #define nop() __asm__ __volatile__ ("nop")
+
+ #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
+
+ #define tas(ptr) (xchg((ptr),1))
+
+ struct __xchg_dummy { unsigned long a[100]; };
+ #define __xg(x) ((struct __xchg_dummy *)(x))
+
+
+ /*
+ * The semantics of XCHGCMP8B are a bit strange, this is why
+ * there is a loop and the loading of %%eax and %%edx has to
+ * be inside. This inlines well in most cases, the cached
+ * cost is around ~38 cycles. (in the future we might want
+ * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
+ * might have an implicit FPU-save as a cost, so it's not
+ * clear which path to go.)
+ *
+ * cmpxchg8b must be used with the lock prefix here to allow
+ * the instruction to be executed atomically, see page 3-102
+ * of the instruction set reference 24319102.pdf. We need
+ * the reader side to see the coherent 64bit value.
+ */
+ static inline void __set_64bit (unsigned long long * ptr,
+ unsigned int low, unsigned int high)
+ {
+ __asm__ __volatile__ (
+ "\n1:\t"
+ "movl (%0), %%eax\n\t"
+ "movl 4(%0), %%edx\n\t"
+ "lock cmpxchg8b (%0)\n\t"
+ "jnz 1b"
+ : /* no outputs */
+ : "D"(ptr),
+ "b"(low),
+ "c"(high)
+ : "ax","dx","memory");
+ }
+
+ static inline void __set_64bit_constant (unsigned long long *ptr,
+ unsigned long long value)
+ {
+ __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
+ }
+ #define ll_low(x) *(((unsigned int*)&(x))+0)
+ #define ll_high(x) *(((unsigned int*)&(x))+1)
+
+ static inline void __set_64bit_var (unsigned long long *ptr,
+ unsigned long long value)
+ {
+ __set_64bit(ptr,ll_low(value), ll_high(value));
+ }
+
+ #define set_64bit(ptr,value) \
+ (__builtin_constant_p(value) ? \
+ __set_64bit_constant(ptr, value) : \
+ __set_64bit_var(ptr, value) )
+
+ #define _set_64bit(ptr,value) \
+ (__builtin_constant_p(value) ? \
+ __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
+ __set_64bit(ptr, ll_low(value), ll_high(value)) )
+
+ /*
+ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
+ * Note 2: xchg has side effect, so that attribute volatile is necessary,
+ * but generally the primitive is invalid, *ptr is output argument. --ANK
+ */
+ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+ {
+ switch (size) {
+ case 1:
+ __asm__ __volatile__("xchgb %b0,%1"
+ :"=q" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 2:
+ __asm__ __volatile__("xchgw %w0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 4:
+ __asm__ __volatile__("xchgl %0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ }
+ return x;
+ }
+
+ /*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+ #ifdef CONFIG_X86_CMPXCHG
+ #define __HAVE_ARCH_CMPXCHG 1
+ #endif
+
+ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long new, int size)
+ {
+ unsigned long prev;
+ switch (size) {
+ case 1:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 2:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 4:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ }
+ return old;
+ }
+
+ #define cmpxchg(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+ (unsigned long)(n),sizeof(*(ptr))))
+
+ #ifdef __KERNEL__
+ struct alt_instr {
+ __u8 *instr; /* original instruction */
+ __u8 *replacement;
+ __u8 cpuid; /* cpuid bit set for replacement */
+ __u8 instrlen; /* length of original instruction */
+ __u8 replacementlen; /* length of new instruction, <= instrlen */
+ __u8 pad;
+ };
+ #endif
+
+ /*
+ * Alternative instructions for different CPU types or capabilities.
+ *
+ * This allows to use optimized instructions even on generic binary
+ * kernels.
+ *
+ * length of oldinstr must be longer or equal the length of newinstr
+ * It can be padded with nops as needed.
+ *
+ * For non barrier like inlines please define new variants
+ * without volatile and memory clobber.
+ */
+ #define alternative(oldinstr, newinstr, feature) \
+ asm volatile ("661:\n\t" oldinstr "\n662:\n" \
+ ".section .altinstructions,\"a\"\n" \
+ " .align 4\n" \
+ " .long 661b\n" /* label */ \
+ " .long 663f\n" /* new instruction */ \
+ " .byte %c0\n" /* feature bit */ \
+ " .byte 662b-661b\n" /* sourcelen */ \
+ " .byte 664f-663f\n" /* replacementlen */ \
+ ".previous\n" \
+ ".section .altinstr_replacement,\"ax\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */ \
+ ".previous" :: "i" (feature) : "memory")
+
+ /*
+ * Alternative inline assembly with input.
+ *
+ * Pecularities:
+ * No memory clobber here.
+ * Argument numbers start with 1.
+ * Best is to use constraints that are fixed size (like (%1) ... "r")
+ * If you use variable sized constraints like "m" or "g" in the
+ * replacement maake sure to pad to the worst case length.
+ */
+ #define alternative_input(oldinstr, newinstr, feature, input...) \
+ asm volatile ("661:\n\t" oldinstr "\n662:\n" \
+ ".section .altinstructions,\"a\"\n" \
+ " .align 4\n" \
+ " .long 661b\n" /* label */ \
+ " .long 663f\n" /* new instruction */ \
+ " .byte %c0\n" /* feature bit */ \
+ " .byte 662b-661b\n" /* sourcelen */ \
+ " .byte 664f-663f\n" /* replacementlen */ \
+ ".previous\n" \
+ ".section .altinstr_replacement,\"ax\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */ \
+ ".previous" :: "i" (feature), ##input)
+
+ /*
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+ * to devices.
+ *
+ * For now, "wmb()" doesn't actually do anything, as all
+ * Intel CPU's follow what Intel calls a *Processor Order*,
+ * in which all writes are seen in the program order even
+ * outside the CPU.
+ *
+ * I expect future Intel CPU's to have a weaker ordering,
+ * but I'd also expect them to finally get their act together
+ * and add some real memory barriers if so.
+ *
+ * Some non intel clones support out of order store. wmb() ceases to be a
+ * nop for these.
+ */
+
+
+ /*
+ * Actually only lfence would be needed for mb() because all stores done
+ * by the kernel should be already ordered. But keep a full barrier for now.
+ */
+
+ #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
+ #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
+
+ /**
+ * read_barrier_depends - Flush all pending reads that subsequents reads
+ * depend on.
+ *
+ * No data-dependent reads from memory-like regions are ever reordered
+ * over this barrier. All reads preceding this primitive are guaranteed
+ * to access memory (but not necessarily other CPUs' caches) before any
+ * reads following this primitive that depend on the data return by
+ * any of the preceding reads. This primitive is much lighter weight than
+ * rmb() on most CPUs, and is never heavier weight than is
+ * rmb().
+ *
+ * These ordering constraints are respected by both the local CPU
+ * and the compiler.
+ *
+ * Ordering is not guaranteed by anything other than these primitives,
+ * not even by data dependencies. See the documentation for
+ * memory_barrier() for examples and URLs to more information.
+ *
+ * For example, the following code would force ordering (the initial
+ * value of "a" is zero, "b" is one, and "p" is "&a"):
+ *
+ * <programlisting>
+ * CPU 0 CPU 1
+ *
+ * b = 2;
+ * memory_barrier();
+ * p = &b; q = p;
+ * read_barrier_depends();
+ * d = *q;
+ * </programlisting>
+ *
+ * because the read of "*q" depends on the read of "p" and these
+ * two reads are separated by a read_barrier_depends(). However,
+ * the following code, with the same initial values for "a" and "b":
+ *
+ * <programlisting>
+ * CPU 0 CPU 1
+ *
+ * a = 2;
+ * memory_barrier();
+ * b = 3; y = b;
+ * read_barrier_depends();
+ * x = a;
+ * </programlisting>
+ *
+ * does not enforce ordering, since there is no data dependency between
+ * the read of "a" and the read of "b". Therefore, on some CPUs, such
+ * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
+ * in cases like thiswhere there are no data dependencies.
+ **/
+
+ #define read_barrier_depends() do { } while(0)
+
+ #ifdef CONFIG_X86_OOSTORE
+ /* Actually there are no OOO store capable CPUs for now that do SSE,
+ but make it already an possibility. */
+ #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
+ #else
+ #define wmb() __asm__ __volatile__ ("": : :"memory")
+ #endif
+
+ #ifdef CONFIG_SMP
+ #define smp_mb() mb()
+ #define smp_rmb() rmb()
+ #define smp_wmb() wmb()
+ #define smp_read_barrier_depends() read_barrier_depends()
+ #define set_mb(var, value) do { xchg(&var, value); } while (0)
+ #else
+ #define smp_mb() barrier()
+ #define smp_rmb() barrier()
+ #define smp_wmb() barrier()
+ #define smp_read_barrier_depends() do { } while(0)
+ #define set_mb(var, value) do { var = value; barrier(); } while (0)
+ #endif
+
+ #define set_wmb(var, value) do { var = value; wmb(); } while (0)
+
+ /* interrupt control.. */
+
+ /*
+ * The use of 'barrier' in the following reflects their use as local-lock
+ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
- HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \
++ * critical operations are executed. All critical operations must complete
+ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
+ * includes these barriers, for example.
+ */
+
+ #define __cli() \
+ do { \
- shared_info_t *_shared = HYPERVISOR_shared_info; \
++ vcpu_info_t *_vcpu; \
++ preempt_disable(); \
++ _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
++ _vcpu->evtchn_upcall_mask = 1; \
++ preempt_enable_no_resched(); \
+ barrier(); \
+ } while (0)
+
+ #define __sti() \
+ do { \
- _shared->vcpu_data[0].evtchn_upcall_mask = 0; \
++ vcpu_info_t *_vcpu; \
+ barrier(); \
- if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
- force_evtchn_callback(); \
++ preempt_disable(); \
++ _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
++ _vcpu->evtchn_upcall_mask = 0; \
+ barrier(); /* unmask then check (avoid races) */ \
- (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \
++ if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
++ force_evtchn_callback(); \
++ preempt_enable(); \
+ } while (0)
+
+ #define __save_flags(x) \
+ do { \
- shared_info_t *_shared = HYPERVISOR_shared_info; \
++ vcpu_info_t *_vcpu; \
++ _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
++ (x) = _vcpu->evtchn_upcall_mask; \
+ } while (0)
+
+ #define __restore_flags(x) \
+ do { \
- if ( (_shared->vcpu_data[0].evtchn_upcall_mask = (x)) == 0 ) { \
- barrier(); /* unmask then check (avoid races) */ \
- if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
- force_evtchn_callback(); \
- } \
++ vcpu_info_t *_vcpu; \
+ barrier(); \
-#define safe_halt() ((void)0)
++ preempt_disable(); \
++ _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
++ if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
++ barrier(); /* unmask then check (avoid races) */ \
++ if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
++ force_evtchn_callback(); \
++ preempt_enable(); \
++ } else \
++ preempt_enable_no_resched(); \
+ } while (0)
+
- (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask; \
- HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1; \
++#define safe_halt() ((void)0)
+
+ #define __save_and_cli(x) \
+ do { \
-#define __save_and_sti(x) \
-do { \
- shared_info_t *_shared = HYPERVISOR_shared_info; \
- barrier(); \
- (x) = _shared->vcpu_data[0].evtchn_upcall_mask; \
- _shared->vcpu_data[0].evtchn_upcall_mask = 0; \
- barrier(); /* unmask then check (avoid races) */ \
- if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) ) \
- force_evtchn_callback(); \
-} while (0)
-
++ vcpu_info_t *_vcpu; \
++ preempt_disable(); \
++ _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
++ (x) = _vcpu->evtchn_upcall_mask; \
++ _vcpu->evtchn_upcall_mask = 1; \
++ preempt_enable_no_resched(); \
+ barrier(); \
+ } while (0)
+
-#define irqs_disabled() HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask
+ #define local_irq_save(x) __save_and_cli(x)
+ #define local_irq_restore(x) __restore_flags(x)
+ #define local_save_flags(x) __save_flags(x)
+ #define local_irq_disable() __cli()
+ #define local_irq_enable() __sti()
+
++#define irqs_disabled() \
++ HYPERVISOR_shared_info->vcpu_data[smp_processor_id()].evtchn_upcall_mask
+
+ /*
+ * disable hlt during certain critical i/o operations
+ */
+ #define HAVE_DISABLE_HLT
+ void disable_hlt(void);
+ void enable_hlt(void);
+
+ extern int es7000_plat;
+ void cpu_idle_wait(void);
+
+ #endif
--- /dev/null
-/* Force a proper event-channel callback from Xen. */
-void force_evtchn_callback(void);
-
+ /******************************************************************************
+ * evtchn.h
+ *
+ * Communication via Xen event channels.
+ * Also definitions for the device that demuxes notifications to userspace.
+ *
+ * Copyright (c) 2004, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+ #ifndef __ASM_EVTCHN_H__
+ #define __ASM_EVTCHN_H__
+
+ #include <linux/config.h>
+ #include <asm-xen/hypervisor.h>
+ #include <asm/ptrace.h>
+ #include <asm/synch_bitops.h>
+ #include <asm-xen/xen-public/event_channel.h>
++#include <linux/smp.h>
+
+ /*
+ * LOW-LEVEL DEFINITIONS
+ */
+
- !synch_test_and_set_bit(port>>5, &s->evtchn_pending_sel) )
+ /* Entry point for notifications into Linux subsystems. */
+ asmlinkage void evtchn_do_upcall(struct pt_regs *regs);
+
+ /* Entry point for notifications into the userland character device. */
+ void evtchn_device_upcall(int port);
+
+ static inline void mask_evtchn(int port)
+ {
+ shared_info_t *s = HYPERVISOR_shared_info;
+ synch_set_bit(port, &s->evtchn_mask[0]);
+ }
+
+ static inline void unmask_evtchn(int port)
+ {
+ shared_info_t *s = HYPERVISOR_shared_info;
++ vcpu_info_t *vcpu_info = &s->vcpu_data[smp_processor_id()];
+
+ synch_clear_bit(port, &s->evtchn_mask[0]);
+
+ /*
+ * The following is basically the equivalent of 'hw_resend_irq'. Just like
+ * a real IO-APIC we 'lose the interrupt edge' if the channel is masked.
+ */
+ if ( synch_test_bit (port, &s->evtchn_pending[0]) &&
- s->vcpu_data[0].evtchn_upcall_pending = 1;
- if ( !s->vcpu_data[0].evtchn_upcall_mask )
++ !synch_test_and_set_bit(port>>5, &vcpu_info->evtchn_pending_sel) )
+ {
++ vcpu_info->evtchn_upcall_pending = 1;
++ if ( !vcpu_info->evtchn_upcall_mask )
+ force_evtchn_callback();
+ }
+ }
+
+ static inline void clear_evtchn(int port)
+ {
+ shared_info_t *s = HYPERVISOR_shared_info;
+ synch_clear_bit(port, &s->evtchn_pending[0]);
+ }
+
+ static inline void notify_via_evtchn(int port)
+ {
+ evtchn_op_t op;
+ op.cmd = EVTCHNOP_send;
+ op.u.send.local_port = port;
+ (void)HYPERVISOR_event_channel_op(&op);
+ }
+
+ /*
+ * CHARACTER-DEVICE DEFINITIONS
+ */
+
+ /* /dev/xen/evtchn resides at device number major=10, minor=201 */
+ #define EVTCHN_MINOR 201
+
+ /* /dev/xen/evtchn ioctls: */
+ /* EVTCHN_RESET: Clear and reinit the event buffer. Clear error condition. */
+ #define EVTCHN_RESET _IO('E', 1)
+ /* EVTCHN_BIND: Bind to teh specified event-channel port. */
+ #define EVTCHN_BIND _IO('E', 2)
+ /* EVTCHN_UNBIND: Unbind from the specified event-channel port. */
+ #define EVTCHN_UNBIND _IO('E', 3)
+
+ #endif /* __ASM_EVTCHN_H__ */
--- /dev/null
+ /******************************************************************************
+ * gnttab.h
+ *
+ * Two sets of functionality:
+ * 1. Granting foreign access to our memory reservation.
+ * 2. Accessing others' memory reservations via grant references.
+ * (i.e., mechanisms for both sender and recipient of grant references)
+ *
+ * Copyright (c) 2004, K A Fraser
+ */
+
+ #ifndef __ASM_GNTTAB_H__
+ #define __ASM_GNTTAB_H__
+
+ #include <linux/config.h>
+ #include <asm-xen/hypervisor.h>
+ #include <asm-xen/xen-public/grant_table.h>
+
+ int
+ gnttab_grant_foreign_access(
+ domid_t domid, unsigned long frame, int readonly);
+
+ void
+ gnttab_end_foreign_access(
+ grant_ref_t ref, int readonly);
+
+ int
+ gnttab_grant_foreign_transfer(
+ domid_t domid);
+
+ unsigned long
+ gnttab_end_foreign_transfer(
+ grant_ref_t ref);
+
++int
++gnttab_query_foreign_access(
++ grant_ref_t ref );
++
+ #endif /* __ASM_GNTTAB_H__ */
--- /dev/null
-extern unsigned int mmu_update_queue_idx;
-
+ /******************************************************************************
+ * hypervisor.h
+ *
+ * Linux-specific hypervisor handling.
+ *
+ * Copyright (c) 2002-2004, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+ #ifndef __HYPERVISOR_H__
+ #define __HYPERVISOR_H__
+
+ #include <linux/config.h>
+ #include <linux/types.h>
+ #include <linux/kernel.h>
+ #include <linux/version.h>
+ #include <asm-xen/xen-public/xen.h>
+ #include <asm-xen/xen-public/dom0_ops.h>
+ #include <asm-xen/xen-public/io/domain_controller.h>
+ #include <asm/ptrace.h>
+ #include <asm/page.h>
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ #include <asm-generic/pgtable-nopmd.h>
+ #endif
+
+ /* arch/xen/i386/kernel/setup.c */
+ union xen_start_info_union
+ {
+ start_info_t xen_start_info;
+ char padding[512];
+ };
+ extern union xen_start_info_union xen_start_info_union;
+ #define xen_start_info (xen_start_info_union.xen_start_info)
+
++/* arch/xen/kernel/evtchn.c */
++/* Force a proper event-channel callback from Xen. */
++void force_evtchn_callback(void);
++
+ /* arch/xen/kernel/process.c */
+ void xen_cpu_idle (void);
+
+ /* arch/xen/i386/kernel/hypervisor.c */
+ void do_hypervisor_callback(struct pt_regs *regs);
+
+ /* arch/xen/i386/kernel/head.S */
+ void lgdt_finish(void);
+
+ /* arch/xen/i386/mm/hypervisor.c */
+ /*
+ * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already
+ * be MACHINE addresses.
+ */
+
-static inline int flush_page_update_queue(void)
-{
- unsigned int idx = mmu_update_queue_idx;
- if ( idx != 0 ) _flush_page_update_queue();
- return idx;
-}
+ void queue_l1_entry_update(pte_t *ptr, unsigned long val);
+ void queue_l2_entry_update(pmd_t *ptr, pmd_t val);
+ void queue_pt_switch(unsigned long ptr);
+ void queue_tlb_flush(void);
+ void queue_invlpg(unsigned long ptr);
+ void queue_pgd_pin(unsigned long ptr);
+ void queue_pgd_unpin(unsigned long ptr);
+ void queue_pte_pin(unsigned long ptr);
+ void queue_pte_unpin(unsigned long ptr);
+ void queue_set_ldt(unsigned long ptr, unsigned long bytes);
+ void queue_machphys_update(unsigned long mfn, unsigned long pfn);
+ void xen_l1_entry_update(pte_t *ptr, unsigned long val);
+ void xen_l2_entry_update(pmd_t *ptr, pmd_t val);
+ void xen_pt_switch(unsigned long ptr);
+ void xen_tlb_flush(void);
+ void xen_invlpg(unsigned long ptr);
+ void xen_pgd_pin(unsigned long ptr);
+ void xen_pgd_unpin(unsigned long ptr);
+ void xen_pte_pin(unsigned long ptr);
+ void xen_pte_unpin(unsigned long ptr);
+ void xen_set_ldt(unsigned long ptr, unsigned long bytes);
+ void xen_machphys_update(unsigned long mfn, unsigned long pfn);
+
+ void _flush_page_update_queue(void);
- void)
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
++/*
++** XXX SMH: 2.4 doesn't have percpu.h (or support SMP guests) so just
++** include sufficient #defines to allow the below to build.
++*/
++#define DEFINE_PER_CPU(type, name) \
++ __typeof__(type) per_cpu__##name
++
++#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var))
++#define __get_cpu_var(var) per_cpu__##var
++#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
++
++#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
++#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
++#endif /* linux < 2.6.0 */
++
++#define flush_page_update_queue() do { \
++ DECLARE_PER_CPU(unsigned int, mmu_update_queue_idx); \
++ if (per_cpu(mmu_update_queue_idx, smp_processor_id())) \
++ _flush_page_update_queue(); \
++} while (0)
+ #define xen_flush_page_update_queue() (_flush_page_update_queue())
+ #define XEN_flush_page_update_queue() (_flush_page_update_queue())
+ void MULTICALL_flush_page_update_queue(void);
+
+ #ifdef CONFIG_XEN_PHYSDEV_ACCESS
+ /* Allocate a contiguous empty region of low memory. Return virtual start. */
+ unsigned long allocate_empty_lowmem_region(unsigned long pages);
+ #endif
+
+ /*
+ * Assembler stubs for hyper-calls.
+ */
+
+ static inline int
+ HYPERVISOR_set_trap_table(
+ trap_info_t *table)
+ {
+ int ret;
+ unsigned long ignore;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ignore)
+ : "0" (__HYPERVISOR_set_trap_table), "1" (table)
+ : "memory" );
+
+ return ret;
+ }
+
+ static inline int
+ HYPERVISOR_mmu_update(
+ mmu_update_t *req, int count, int *success_count)
+ {
+ int ret;
+ unsigned long ign1, ign2, ign3;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
+ : "0" (__HYPERVISOR_mmu_update), "1" (req), "2" (count),
+ "3" (success_count)
+ : "memory" );
+
+ return ret;
+ }
+
+ static inline int
+ HYPERVISOR_set_gdt(
+ unsigned long *frame_list, int entries)
+ {
+ int ret;
+ unsigned long ign1, ign2;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_set_gdt), "1" (frame_list), "2" (entries)
+ : "memory" );
+
+
+ return ret;
+ }
+
+ static inline int
+ HYPERVISOR_stack_switch(
+ unsigned long ss, unsigned long esp)
+ {
+ int ret;
+ unsigned long ign1, ign2;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_stack_switch), "1" (ss), "2" (esp)
+ : "memory" );
+
+ return ret;
+ }
+
+ static inline int
+ HYPERVISOR_set_callbacks(
+ unsigned long event_selector, unsigned long event_address,
+ unsigned long failsafe_selector, unsigned long failsafe_address)
+ {
+ int ret;
+ unsigned long ign1, ign2, ign3, ign4;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
+ : "0" (__HYPERVISOR_set_callbacks), "1" (event_selector),
+ "2" (event_address), "3" (failsafe_selector), "4" (failsafe_address)
+ : "memory" );
+
+ return ret;
+ }
+
+ static inline int
+ HYPERVISOR_fpu_taskswitch(
- : "=a" (ret) : "0" (__HYPERVISOR_fpu_taskswitch) : "memory" );
++ int set)
+ {
+ int ret;
++ unsigned long ign;
++
+ __asm__ __volatile__ (
+ TRAP_INSTR
- unsigned long page_nr, pte_t new_val, unsigned long flags)
++ : "=a" (ret), "=b" (ign)
++ : "0" (__HYPERVISOR_fpu_taskswitch), "1" (set)
++ : "memory" );
+
+ return ret;
+ }
+
+ static inline int
+ HYPERVISOR_yield(
+ void)
+ {
+ int ret;
+ unsigned long ign;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign)
+ : "0" (__HYPERVISOR_sched_op), "1" (SCHEDOP_yield)
+ : "memory" );
+
+ return ret;
+ }
+
+ static inline int
+ HYPERVISOR_block(
+ void)
+ {
+ int ret;
+ unsigned long ign1;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1)
+ : "0" (__HYPERVISOR_sched_op), "1" (SCHEDOP_block)
+ : "memory" );
+
+ return ret;
+ }
+
+ static inline int
+ HYPERVISOR_shutdown(
+ void)
+ {
+ int ret;
+ unsigned long ign1;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1)
+ : "0" (__HYPERVISOR_sched_op),
+ "1" (SCHEDOP_shutdown | (SHUTDOWN_poweroff << SCHEDOP_reasonshift))
+ : "memory" );
+
+ return ret;
+ }
+
+ static inline int
+ HYPERVISOR_reboot(
+ void)
+ {
+ int ret;
+ unsigned long ign1;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1)
+ : "0" (__HYPERVISOR_sched_op),
+ "1" (SCHEDOP_shutdown | (SHUTDOWN_reboot << SCHEDOP_reasonshift))
+ : "memory" );
+
+ return ret;
+ }
+
+ static inline int
+ HYPERVISOR_suspend(
+ unsigned long srec)
+ {
+ int ret;
+ unsigned long ign1, ign2;
+
+ /* NB. On suspend, control software expects a suspend record in %esi. */
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=S" (ign2)
+ : "0" (__HYPERVISOR_sched_op),
+ "b" (SCHEDOP_shutdown | (SHUTDOWN_suspend << SCHEDOP_reasonshift)),
+ "S" (srec) : "memory");
+
+ return ret;
+ }
+
+ static inline long
+ HYPERVISOR_set_timer_op(
+ u64 timeout)
+ {
+ int ret;
+ unsigned long timeout_hi = (unsigned long)(timeout>>32);
+ unsigned long timeout_lo = (unsigned long)timeout;
+ unsigned long ign1, ign2;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_set_timer_op), "b" (timeout_hi), "c" (timeout_lo)
+ : "memory");
+
+ return ret;
+ }
+
+ static inline int
+ HYPERVISOR_dom0_op(
+ dom0_op_t *dom0_op)
+ {
+ int ret;
+ unsigned long ign1;
+
+ dom0_op->interface_version = DOM0_INTERFACE_VERSION;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1)
+ : "0" (__HYPERVISOR_dom0_op), "1" (dom0_op)
+ : "memory");
+
+ return ret;
+ }
+
+ static inline int
+ HYPERVISOR_set_debugreg(
+ int reg, unsigned long value)
+ {
+ int ret;
+ unsigned long ign1, ign2;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_set_debugreg), "1" (reg), "2" (value)
+ : "memory" );
+
+ return ret;
+ }
+
+ static inline unsigned long
+ HYPERVISOR_get_debugreg(
+ int reg)
+ {
+ unsigned long ret;
+ unsigned long ign;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign)
+ : "0" (__HYPERVISOR_get_debugreg), "1" (reg)
+ : "memory" );
+
+ return ret;
+ }
+
+ static inline int
+ HYPERVISOR_update_descriptor(
+ unsigned long ma, unsigned long word1, unsigned long word2)
+ {
+ int ret;
+ unsigned long ign1, ign2, ign3;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
+ : "0" (__HYPERVISOR_update_descriptor), "1" (ma), "2" (word1),
+ "3" (word2)
+ : "memory" );
+
+ return ret;
+ }
+
+ static inline int
+ HYPERVISOR_set_fast_trap(
+ int idx)
+ {
+ int ret;
+ unsigned long ign;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign)
+ : "0" (__HYPERVISOR_set_fast_trap), "1" (idx)
+ : "memory" );
+
+ return ret;
+ }
+
+ static inline int
+ HYPERVISOR_dom_mem_op(
+ unsigned int op, unsigned long *extent_list,
+ unsigned long nr_extents, unsigned int extent_order)
+ {
+ int ret;
+ unsigned long ign1, ign2, ign3, ign4, ign5;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4),
+ "=D" (ign5)
+ : "0" (__HYPERVISOR_dom_mem_op), "1" (op), "2" (extent_list),
+ "3" (nr_extents), "4" (extent_order), "5" (DOMID_SELF)
+ : "memory" );
+
+ return ret;
+ }
+
+ static inline int
+ HYPERVISOR_multicall(
+ void *call_list, int nr_calls)
+ {
+ int ret;
+ unsigned long ign1, ign2;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_multicall), "1" (call_list), "2" (nr_calls)
+ : "memory" );
+
+ return ret;
+ }
+
+ static inline int
+ HYPERVISOR_update_va_mapping(
- "1" (page_nr), "2" ((new_val).pte_low), "3" (flags)
++ unsigned long va, pte_t new_val, unsigned long flags)
+ {
+ int ret;
+ unsigned long ign1, ign2, ign3;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
+ : "0" (__HYPERVISOR_update_va_mapping),
- page_nr, (new_val).pte_low, flags);
++ "1" (va), "2" ((new_val).pte_low), "3" (flags)
+ : "memory" );
+
+ if ( unlikely(ret < 0) )
+ {
+ printk(KERN_ALERT "Failed update VA mapping: %08lx, %08lx, %08lx\n",
- : "0" (__HYPERVISOR_grant_table_op), "1" (cmd), "2" (count), "3" (uop)
++ va, (new_val).pte_low, flags);
+ BUG();
+ }
+
+ return ret;
+ }
+
+ static inline int
+ HYPERVISOR_event_channel_op(
+ void *op)
+ {
+ int ret;
+ unsigned long ignore;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ignore)
+ : "0" (__HYPERVISOR_event_channel_op), "1" (op)
+ : "memory" );
+
+ return ret;
+ }
+
+ static inline int
+ HYPERVISOR_xen_version(
+ int cmd)
+ {
+ int ret;
+ unsigned long ignore;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ignore)
+ : "0" (__HYPERVISOR_xen_version), "1" (cmd)
+ : "memory" );
+
+ return ret;
+ }
+
+ static inline int
+ HYPERVISOR_console_io(
+ int cmd, int count, char *str)
+ {
+ int ret;
+ unsigned long ign1, ign2, ign3;
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
+ : "0" (__HYPERVISOR_console_io), "1" (cmd), "2" (count), "3" (str)
+ : "memory" );
+
+ return ret;
+ }
+
+ static inline int
+ HYPERVISOR_physdev_op(
+ void *physdev_op)
+ {
+ int ret;
+ unsigned long ign;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign)
+ : "0" (__HYPERVISOR_physdev_op), "1" (physdev_op)
+ : "memory" );
+
+ return ret;
+ }
+
+ static inline int
+ HYPERVISOR_grant_table_op(
+ unsigned int cmd, void *uop, unsigned int count)
+ {
+ int ret;
+ unsigned long ign1, ign2, ign3;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3)
- unsigned long page_nr, pte_t new_val, unsigned long flags, domid_t domid)
++ : "0" (__HYPERVISOR_grant_table_op), "1" (cmd), "2" (uop), "3" (count)
+ : "memory" );
+
+ return ret;
+ }
+
+ static inline int
+ HYPERVISOR_update_va_mapping_otherdomain(
- "1" (page_nr), "2" ((new_val).pte_low), "3" (flags), "4" (domid) :
++ unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
+ {
+ int ret;
+ unsigned long ign1, ign2, ign3, ign4;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2), "=d" (ign3), "=S" (ign4)
+ : "0" (__HYPERVISOR_update_va_mapping_otherdomain),
++ "1" (va), "2" ((new_val).pte_low), "3" (flags), "4" (domid) :
+ "memory" );
+
+ return ret;
+ }
+
+ static inline int
+ HYPERVISOR_vm_assist(
+ unsigned int cmd, unsigned int type)
+ {
+ int ret;
+ unsigned long ign1, ign2;
+
+ __asm__ __volatile__ (
+ TRAP_INSTR
+ : "=a" (ret), "=b" (ign1), "=c" (ign2)
+ : "0" (__HYPERVISOR_vm_assist), "1" (cmd), "2" (type)
+ : "memory" );
+
+ return ret;
+ }
+
++static inline int
++HYPERVISOR_boot_vcpu(
++ unsigned long vcpu, full_execution_context_t *ctxt)
++{
++ int ret;
++ unsigned long ign1, ign2;
++
++ __asm__ __volatile__ (
++ TRAP_INSTR
++ : "=a" (ret), "=b" (ign1), "=c" (ign2)
++ : "0" (__HYPERVISOR_boot_vcpu), "1" (vcpu), "2" (ctxt)
++ : "memory");
++
++ return ret;
++}
++
+ #endif /* __HYPERVISOR_H__ */
--- /dev/null
-extern multicall_entry_t multicall_list[];
-extern int nr_multicall_ents;
+ /******************************************************************************
+ * multicall.h
+ *
+ * Copyright (c) 2003-2004, K A Fraser
+ *
+ * This file may be distributed separately from the Linux kernel, or
+ * incorporated into other software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+ #ifndef __MULTICALL_H__
+ #define __MULTICALL_H__
+
+ #include <asm-xen/hypervisor.h>
+
- int i = nr_multicall_ents;
- multicall_list[i].op = op;
- nr_multicall_ents = i+1;
++DECLARE_PER_CPU(multicall_entry_t, multicall_list[]);
++DECLARE_PER_CPU(int, nr_multicall_ents);
+
+ static inline void queue_multicall0(unsigned long op)
+ {
- int i = nr_multicall_ents;
- multicall_list[i].op = op;
- multicall_list[i].args[0] = arg1;
- nr_multicall_ents = i+1;
++ int cpu = smp_processor_id();
++ int i = per_cpu(nr_multicall_ents, cpu);
++ per_cpu(multicall_list[i], cpu).op = op;
++ per_cpu(nr_multicall_ents, cpu) = i+1;
+ }
+
+ static inline void queue_multicall1(unsigned long op, unsigned long arg1)
+ {
- int i = nr_multicall_ents;
- multicall_list[i].op = op;
- multicall_list[i].args[0] = arg1;
- multicall_list[i].args[1] = arg2;
- nr_multicall_ents = i+1;
++ int cpu = smp_processor_id();
++ int i = per_cpu(nr_multicall_ents, cpu);
++ per_cpu(multicall_list[i], cpu).op = op;
++ per_cpu(multicall_list[i], cpu).args[0] = arg1;
++ per_cpu(nr_multicall_ents, cpu) = i+1;
+ }
+
+ static inline void queue_multicall2(
+ unsigned long op, unsigned long arg1, unsigned long arg2)
+ {
- int i = nr_multicall_ents;
- multicall_list[i].op = op;
- multicall_list[i].args[0] = arg1;
- multicall_list[i].args[1] = arg2;
- multicall_list[i].args[2] = arg3;
- nr_multicall_ents = i+1;
++ int cpu = smp_processor_id();
++ int i = per_cpu(nr_multicall_ents, cpu);
++ per_cpu(multicall_list[i], cpu).op = op;
++ per_cpu(multicall_list[i], cpu).args[0] = arg1;
++ per_cpu(multicall_list[i], cpu).args[1] = arg2;
++ per_cpu(nr_multicall_ents, cpu) = i+1;
+ }
+
+ static inline void queue_multicall3(
+ unsigned long op, unsigned long arg1, unsigned long arg2,
+ unsigned long arg3)
+ {
- int i = nr_multicall_ents;
- multicall_list[i].op = op;
- multicall_list[i].args[0] = arg1;
- multicall_list[i].args[1] = arg2;
- multicall_list[i].args[2] = arg3;
- multicall_list[i].args[3] = arg4;
- nr_multicall_ents = i+1;
++ int cpu = smp_processor_id();
++ int i = per_cpu(nr_multicall_ents, cpu);
++ per_cpu(multicall_list[i], cpu).op = op;
++ per_cpu(multicall_list[i], cpu).args[0] = arg1;
++ per_cpu(multicall_list[i], cpu).args[1] = arg2;
++ per_cpu(multicall_list[i], cpu).args[2] = arg3;
++ per_cpu(nr_multicall_ents, cpu) = i+1;
+ }
+
+ static inline void queue_multicall4(
+ unsigned long op, unsigned long arg1, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4)
+ {
- int i = nr_multicall_ents;
- multicall_list[i].op = op;
- multicall_list[i].args[0] = arg1;
- multicall_list[i].args[1] = arg2;
- multicall_list[i].args[2] = arg3;
- multicall_list[i].args[3] = arg4;
- multicall_list[i].args[4] = arg5;
- nr_multicall_ents = i+1;
++ int cpu = smp_processor_id();
++ int i = per_cpu(nr_multicall_ents, cpu);
++ per_cpu(multicall_list[i], cpu).op = op;
++ per_cpu(multicall_list[i], cpu).args[0] = arg1;
++ per_cpu(multicall_list[i], cpu).args[1] = arg2;
++ per_cpu(multicall_list[i], cpu).args[2] = arg3;
++ per_cpu(multicall_list[i], cpu).args[3] = arg4;
++ per_cpu(nr_multicall_ents, cpu) = i+1;
+ }
+
+ static inline void queue_multicall5(
+ unsigned long op, unsigned long arg1, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4, unsigned long arg5)
+ {
- if ( unlikely(nr_multicall_ents == 0) ) return;
- (void)HYPERVISOR_multicall(multicall_list, nr_multicall_ents);
- nr_multicall_ents = 0;
++ int cpu = smp_processor_id();
++ int i = per_cpu(nr_multicall_ents, cpu);
++ per_cpu(multicall_list[i], cpu).op = op;
++ per_cpu(multicall_list[i], cpu).args[0] = arg1;
++ per_cpu(multicall_list[i], cpu).args[1] = arg2;
++ per_cpu(multicall_list[i], cpu).args[2] = arg3;
++ per_cpu(multicall_list[i], cpu).args[3] = arg4;
++ per_cpu(multicall_list[i], cpu).args[4] = arg5;
++ per_cpu(nr_multicall_ents, cpu) = i+1;
+ }
+
+ static inline void execute_multicall_list(void)
+ {
++ int cpu = smp_processor_id();
++ if ( unlikely(per_cpu(nr_multicall_ents, cpu) == 0) ) return;
++ (void)HYPERVISOR_multicall(&per_cpu(multicall_list[0], cpu),
++ per_cpu(nr_multicall_ents, cpu));
++ per_cpu(nr_multicall_ents, cpu) = 0;
+ }
+
+ #endif /* __MULTICALL_H__ */