- include include/asm-generic/Kbuild.asm
-
- header-y += auxvec.h
- header-y += bootx.h
- header-y += byteorder.h
- header-y += cputable.h
- header-y += elf.h
- header-y += errno.h
- header-y += fcntl.h
- header-y += ioctl.h
- header-y += ioctls.h
- header-y += ipcbuf.h
- header-y += linkage.h
- header-y += msgbuf.h
- header-y += nvram.h
- header-y += param.h
- header-y += poll.h
- header-y += posix_types.h
- header-y += ps3fb.h
- header-y += resource.h
- header-y += seccomp.h
- header-y += sembuf.h
- header-y += shmbuf.h
- header-y += sigcontext.h
- header-y += siginfo.h
- header-y += signal.h
- header-y += socket.h
- header-y += sockios.h
- header-y += spu_info.h
- header-y += stat.h
- header-y += statfs.h
- header-y += termbits.h
- header-y += termios.h
- header-y += types.h
- header-y += ucontext.h
- header-y += unistd.h
- header-y += epapr_hcalls.h
-
+ generic-y += clkdev.h
generic-y += rwsem.h
+++ /dev/null
--/*
-- * ePAPR hcall interface
-- *
-- * Copyright 2008-2011 Freescale Semiconductor, Inc.
-- *
-- * Author: Timur Tabi <timur@freescale.com>
-- *
-- * This file is provided under a dual BSD/GPL license. When using or
-- * redistributing this file, you may do so under either license.
-- *
-- * Redistribution and use in source and binary forms, with or without
-- * modification, are permitted provided that the following conditions are met:
-- * * Redistributions of source code must retain the above copyright
-- * notice, this list of conditions and the following disclaimer.
-- * * Redistributions in binary form must reproduce the above copyright
-- * notice, this list of conditions and the following disclaimer in the
-- * documentation and/or other materials provided with the distribution.
-- * * Neither the name of Freescale Semiconductor nor the
-- * names of its contributors may be used to endorse or promote products
-- * derived from this software without specific prior written permission.
-- *
-- *
-- * ALTERNATIVELY, this software may be distributed under the terms of the
-- * GNU General Public License ("GPL") as published by the Free Software
-- * Foundation, either version 2 of that License or (at your option) any
-- * later version.
-- *
-- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
-- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
-- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-- */
--
--/* A "hypercall" is an "sc 1" instruction. This header file file provides C
-- * wrapper functions for the ePAPR hypervisor interface. It is inteded
-- * for use by Linux device drivers and other operating systems.
-- *
-- * The hypercalls are implemented as inline assembly, rather than assembly
-- * language functions in a .S file, for optimization. It allows
-- * the caller to issue the hypercall instruction directly, improving both
-- * performance and memory footprint.
-- */
--
--#ifndef _EPAPR_HCALLS_H
--#define _EPAPR_HCALLS_H
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <asm/byteorder.h>
--
--#define EV_BYTE_CHANNEL_SEND 1
--#define EV_BYTE_CHANNEL_RECEIVE 2
--#define EV_BYTE_CHANNEL_POLL 3
--#define EV_INT_SET_CONFIG 4
--#define EV_INT_GET_CONFIG 5
--#define EV_INT_SET_MASK 6
--#define EV_INT_GET_MASK 7
--#define EV_INT_IACK 9
--#define EV_INT_EOI 10
--#define EV_INT_SEND_IPI 11
--#define EV_INT_SET_TASK_PRIORITY 12
--#define EV_INT_GET_TASK_PRIORITY 13
--#define EV_DOORBELL_SEND 14
--#define EV_MSGSND 15
--#define EV_IDLE 16
--
--/* vendor ID: epapr */
--#define EV_LOCAL_VENDOR_ID 0 /* for private use */
--#define EV_EPAPR_VENDOR_ID 1
--#define EV_FSL_VENDOR_ID 2 /* Freescale Semiconductor */
--#define EV_IBM_VENDOR_ID 3 /* IBM */
--#define EV_GHS_VENDOR_ID 4 /* Green Hills Software */
--#define EV_ENEA_VENDOR_ID 5 /* Enea */
--#define EV_WR_VENDOR_ID 6 /* Wind River Systems */
--#define EV_AMCC_VENDOR_ID 7 /* Applied Micro Circuits */
--#define EV_KVM_VENDOR_ID 42 /* KVM */
--
--/* The max number of bytes that a byte channel can send or receive per call */
--#define EV_BYTE_CHANNEL_MAX_BYTES 16
--
--
--#define _EV_HCALL_TOKEN(id, num) (((id) << 16) | (num))
--#define EV_HCALL_TOKEN(hcall_num) _EV_HCALL_TOKEN(EV_EPAPR_VENDOR_ID, hcall_num)
--
- /* epapr return codes */
- #define EV_SUCCESS 0
-/* epapr error codes */
--#define EV_EPERM 1 /* Operation not permitted */
--#define EV_ENOENT 2 /* Entry Not Found */
--#define EV_EIO 3 /* I/O error occured */
--#define EV_EAGAIN 4 /* The operation had insufficient
-- * resources to complete and should be
-- * retried
-- */
--#define EV_ENOMEM 5 /* There was insufficient memory to
-- * complete the operation */
--#define EV_EFAULT 6 /* Bad guest address */
--#define EV_ENODEV 7 /* No such device */
--#define EV_EINVAL 8 /* An argument supplied to the hcall
-- was out of range or invalid */
--#define EV_INTERNAL 9 /* An internal error occured */
--#define EV_CONFIG 10 /* A configuration error was detected */
--#define EV_INVALID_STATE 11 /* The object is in an invalid state */
--#define EV_UNIMPLEMENTED 12 /* Unimplemented hypercall */
--#define EV_BUFFER_OVERFLOW 13 /* Caller-supplied buffer too small */
-
- #ifndef __ASSEMBLY__
- #include <linux/types.h>
- #include <linux/errno.h>
- #include <asm/byteorder.h>
--
--/*
-- * Hypercall register clobber list
-- *
-- * These macros are used to define the list of clobbered registers during a
-- * hypercall. Technically, registers r0 and r3-r12 are always clobbered,
-- * but the gcc inline assembly syntax does not allow us to specify registers
-- * on the clobber list that are also on the input/output list. Therefore,
-- * the lists of clobbered registers depends on the number of register
-- * parmeters ("+r" and "=r") passed to the hypercall.
-- *
-- * Each assembly block should use one of the HCALL_CLOBBERSx macros. As a
-- * general rule, 'x' is the number of parameters passed to the assembly
-- * block *except* for r11.
-- *
-- * If you're not sure, just use the smallest value of 'x' that does not
-- * generate a compilation error. Because these are static inline functions,
-- * the compiler will only check the clobber list for a function if you
-- * compile code that calls that function.
-- *
-- * r3 and r11 are not included in any clobbers list because they are always
-- * listed as output registers.
-- *
-- * XER, CTR, and LR are currently listed as clobbers because it's uncertain
-- * whether they will be clobbered.
-- *
-- * Note that r11 can be used as an output parameter.
-- *
-- * The "memory" clobber is only necessary for hcalls where the Hypervisor
-- * will read or write guest memory. However, we add it to all hcalls because
-- * the impact is minimal, and we want to ensure that it's present for the
-- * hcalls that need it.
--*/
--
--/* List of common clobbered registers. Do not use this macro. */
--#define EV_HCALL_CLOBBERS "r0", "r12", "xer", "ctr", "lr", "cc", "memory"
--
--#define EV_HCALL_CLOBBERS8 EV_HCALL_CLOBBERS
--#define EV_HCALL_CLOBBERS7 EV_HCALL_CLOBBERS8, "r10"
--#define EV_HCALL_CLOBBERS6 EV_HCALL_CLOBBERS7, "r9"
--#define EV_HCALL_CLOBBERS5 EV_HCALL_CLOBBERS6, "r8"
--#define EV_HCALL_CLOBBERS4 EV_HCALL_CLOBBERS5, "r7"
--#define EV_HCALL_CLOBBERS3 EV_HCALL_CLOBBERS4, "r6"
--#define EV_HCALL_CLOBBERS2 EV_HCALL_CLOBBERS3, "r5"
--#define EV_HCALL_CLOBBERS1 EV_HCALL_CLOBBERS2, "r4"
--
--extern bool epapr_paravirt_enabled;
--extern u32 epapr_hypercall_start[];
--
--/*
-- * We use "uintptr_t" to define a register because it's guaranteed to be a
-- * 32-bit integer on a 32-bit platform, and a 64-bit integer on a 64-bit
-- * platform.
-- *
-- * All registers are either input/output or output only. Registers that are
-- * initialized before making the hypercall are input/output. All
-- * input/output registers are represented with "+r". Output-only registers
-- * are represented with "=r". Do not specify any unused registers. The
-- * clobber list will tell the compiler that the hypercall modifies those
-- * registers, which is good enough.
-- */
--
--/**
-- * ev_int_set_config - configure the specified interrupt
-- * @interrupt: the interrupt number
-- * @config: configuration for this interrupt
-- * @priority: interrupt priority
-- * @destination: destination CPU number
-- *
-- * Returns 0 for success, or an error code.
-- */
--static inline unsigned int ev_int_set_config(unsigned int interrupt,
-- uint32_t config, unsigned int priority, uint32_t destination)
--{
-- register uintptr_t r11 __asm__("r11");
-- register uintptr_t r3 __asm__("r3");
-- register uintptr_t r4 __asm__("r4");
-- register uintptr_t r5 __asm__("r5");
-- register uintptr_t r6 __asm__("r6");
--
-- r11 = EV_HCALL_TOKEN(EV_INT_SET_CONFIG);
-- r3 = interrupt;
-- r4 = config;
-- r5 = priority;
-- r6 = destination;
--
- asm volatile("bl epapr_hypercall_start"
- __asm__ __volatile__ ("sc 1"
-- : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6)
-- : : EV_HCALL_CLOBBERS4
-- );
--
-- return r3;
--}
--
--/**
-- * ev_int_get_config - return the config of the specified interrupt
-- * @interrupt: the interrupt number
-- * @config: returned configuration for this interrupt
-- * @priority: returned interrupt priority
-- * @destination: returned destination CPU number
-- *
-- * Returns 0 for success, or an error code.
-- */
--static inline unsigned int ev_int_get_config(unsigned int interrupt,
-- uint32_t *config, unsigned int *priority, uint32_t *destination)
--{
-- register uintptr_t r11 __asm__("r11");
-- register uintptr_t r3 __asm__("r3");
-- register uintptr_t r4 __asm__("r4");
-- register uintptr_t r5 __asm__("r5");
-- register uintptr_t r6 __asm__("r6");
--
-- r11 = EV_HCALL_TOKEN(EV_INT_GET_CONFIG);
-- r3 = interrupt;
--
- asm volatile("bl epapr_hypercall_start"
- __asm__ __volatile__ ("sc 1"
-- : "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5), "=r" (r6)
-- : : EV_HCALL_CLOBBERS4
-- );
--
-- *config = r4;
-- *priority = r5;
-- *destination = r6;
--
-- return r3;
--}
--
--/**
-- * ev_int_set_mask - sets the mask for the specified interrupt source
-- * @interrupt: the interrupt number
-- * @mask: 0=enable interrupts, 1=disable interrupts
-- *
-- * Returns 0 for success, or an error code.
-- */
--static inline unsigned int ev_int_set_mask(unsigned int interrupt,
-- unsigned int mask)
--{
-- register uintptr_t r11 __asm__("r11");
-- register uintptr_t r3 __asm__("r3");
-- register uintptr_t r4 __asm__("r4");
--
-- r11 = EV_HCALL_TOKEN(EV_INT_SET_MASK);
-- r3 = interrupt;
-- r4 = mask;
--
- asm volatile("bl epapr_hypercall_start"
- __asm__ __volatile__ ("sc 1"
-- : "+r" (r11), "+r" (r3), "+r" (r4)
-- : : EV_HCALL_CLOBBERS2
-- );
--
-- return r3;
--}
--
--/**
-- * ev_int_get_mask - returns the mask for the specified interrupt source
-- * @interrupt: the interrupt number
-- * @mask: returned mask for this interrupt (0=enabled, 1=disabled)
-- *
-- * Returns 0 for success, or an error code.
-- */
--static inline unsigned int ev_int_get_mask(unsigned int interrupt,
-- unsigned int *mask)
--{
-- register uintptr_t r11 __asm__("r11");
-- register uintptr_t r3 __asm__("r3");
-- register uintptr_t r4 __asm__("r4");
--
-- r11 = EV_HCALL_TOKEN(EV_INT_GET_MASK);
-- r3 = interrupt;
--
- asm volatile("bl epapr_hypercall_start"
- __asm__ __volatile__ ("sc 1"
-- : "+r" (r11), "+r" (r3), "=r" (r4)
-- : : EV_HCALL_CLOBBERS2
-- );
--
-- *mask = r4;
--
-- return r3;
--}
--
--/**
-- * ev_int_eoi - signal the end of interrupt processing
-- * @interrupt: the interrupt number
-- *
-- * This function signals the end of processing for the the specified
-- * interrupt, which must be the interrupt currently in service. By
-- * definition, this is also the highest-priority interrupt.
-- *
-- * Returns 0 for success, or an error code.
-- */
--static inline unsigned int ev_int_eoi(unsigned int interrupt)
--{
-- register uintptr_t r11 __asm__("r11");
-- register uintptr_t r3 __asm__("r3");
--
-- r11 = EV_HCALL_TOKEN(EV_INT_EOI);
-- r3 = interrupt;
--
- asm volatile("bl epapr_hypercall_start"
- __asm__ __volatile__ ("sc 1"
-- : "+r" (r11), "+r" (r3)
-- : : EV_HCALL_CLOBBERS1
-- );
--
-- return r3;
--}
--
--/**
-- * ev_byte_channel_send - send characters to a byte stream
-- * @handle: byte stream handle
-- * @count: (input) num of chars to send, (output) num chars sent
-- * @buffer: pointer to a 16-byte buffer
-- *
-- * @buffer must be at least 16 bytes long, because all 16 bytes will be
-- * read from memory into registers, even if count < 16.
-- *
-- * Returns 0 for success, or an error code.
-- */
--static inline unsigned int ev_byte_channel_send(unsigned int handle,
-- unsigned int *count, const char buffer[EV_BYTE_CHANNEL_MAX_BYTES])
--{
-- register uintptr_t r11 __asm__("r11");
-- register uintptr_t r3 __asm__("r3");
-- register uintptr_t r4 __asm__("r4");
-- register uintptr_t r5 __asm__("r5");
-- register uintptr_t r6 __asm__("r6");
-- register uintptr_t r7 __asm__("r7");
-- register uintptr_t r8 __asm__("r8");
-- const uint32_t *p = (const uint32_t *) buffer;
--
-- r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_SEND);
-- r3 = handle;
-- r4 = *count;
-- r5 = be32_to_cpu(p[0]);
-- r6 = be32_to_cpu(p[1]);
-- r7 = be32_to_cpu(p[2]);
-- r8 = be32_to_cpu(p[3]);
--
- asm volatile("bl epapr_hypercall_start"
- __asm__ __volatile__ ("sc 1"
-- : "+r" (r11), "+r" (r3),
-- "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), "+r" (r8)
-- : : EV_HCALL_CLOBBERS6
-- );
--
-- *count = r4;
--
-- return r3;
--}
--
--/**
-- * ev_byte_channel_receive - fetch characters from a byte channel
-- * @handle: byte channel handle
-- * @count: (input) max num of chars to receive, (output) num chars received
-- * @buffer: pointer to a 16-byte buffer
-- *
-- * The size of @buffer must be at least 16 bytes, even if you request fewer
-- * than 16 characters, because we always write 16 bytes to @buffer. This is
-- * for performance reasons.
-- *
-- * Returns 0 for success, or an error code.
-- */
--static inline unsigned int ev_byte_channel_receive(unsigned int handle,
-- unsigned int *count, char buffer[EV_BYTE_CHANNEL_MAX_BYTES])
--{
-- register uintptr_t r11 __asm__("r11");
-- register uintptr_t r3 __asm__("r3");
-- register uintptr_t r4 __asm__("r4");
-- register uintptr_t r5 __asm__("r5");
-- register uintptr_t r6 __asm__("r6");
-- register uintptr_t r7 __asm__("r7");
-- register uintptr_t r8 __asm__("r8");
-- uint32_t *p = (uint32_t *) buffer;
--
-- r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_RECEIVE);
-- r3 = handle;
-- r4 = *count;
--
- asm volatile("bl epapr_hypercall_start"
- __asm__ __volatile__ ("sc 1"
-- : "+r" (r11), "+r" (r3), "+r" (r4),
-- "=r" (r5), "=r" (r6), "=r" (r7), "=r" (r8)
-- : : EV_HCALL_CLOBBERS6
-- );
--
-- *count = r4;
-- p[0] = cpu_to_be32(r5);
-- p[1] = cpu_to_be32(r6);
-- p[2] = cpu_to_be32(r7);
-- p[3] = cpu_to_be32(r8);
--
-- return r3;
--}
--
--/**
-- * ev_byte_channel_poll - returns the status of the byte channel buffers
-- * @handle: byte channel handle
-- * @rx_count: returned count of bytes in receive queue
-- * @tx_count: returned count of free space in transmit queue
-- *
-- * This function reports the amount of data in the receive queue (i.e. the
-- * number of bytes you can read), and the amount of free space in the transmit
-- * queue (i.e. the number of bytes you can write).
-- *
-- * Returns 0 for success, or an error code.
-- */
--static inline unsigned int ev_byte_channel_poll(unsigned int handle,
-- unsigned int *rx_count, unsigned int *tx_count)
--{
-- register uintptr_t r11 __asm__("r11");
-- register uintptr_t r3 __asm__("r3");
-- register uintptr_t r4 __asm__("r4");
-- register uintptr_t r5 __asm__("r5");
--
-- r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_POLL);
-- r3 = handle;
--
- asm volatile("bl epapr_hypercall_start"
- __asm__ __volatile__ ("sc 1"
-- : "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5)
-- : : EV_HCALL_CLOBBERS3
-- );
--
-- *rx_count = r4;
-- *tx_count = r5;
--
-- return r3;
--}
--
--/**
-- * ev_int_iack - acknowledge an interrupt
-- * @handle: handle to the target interrupt controller
-- * @vector: returned interrupt vector
-- *
-- * If handle is zero, the function returns the next interrupt source
-- * number to be handled irrespective of the hierarchy or cascading
-- * of interrupt controllers. If non-zero, specifies a handle to the
-- * interrupt controller that is the target of the acknowledge.
-- *
-- * Returns 0 for success, or an error code.
-- */
--static inline unsigned int ev_int_iack(unsigned int handle,
-- unsigned int *vector)
--{
-- register uintptr_t r11 __asm__("r11");
-- register uintptr_t r3 __asm__("r3");
-- register uintptr_t r4 __asm__("r4");
--
-- r11 = EV_HCALL_TOKEN(EV_INT_IACK);
-- r3 = handle;
--
- asm volatile("bl epapr_hypercall_start"
- __asm__ __volatile__ ("sc 1"
-- : "+r" (r11), "+r" (r3), "=r" (r4)
-- : : EV_HCALL_CLOBBERS2
-- );
--
-- *vector = r4;
--
-- return r3;
--}
--
--/**
-- * ev_doorbell_send - send a doorbell to another partition
-- * @handle: doorbell send handle
-- *
-- * Returns 0 for success, or an error code.
-- */
--static inline unsigned int ev_doorbell_send(unsigned int handle)
--{
-- register uintptr_t r11 __asm__("r11");
-- register uintptr_t r3 __asm__("r3");
--
-- r11 = EV_HCALL_TOKEN(EV_DOORBELL_SEND);
-- r3 = handle;
--
- asm volatile("bl epapr_hypercall_start"
- __asm__ __volatile__ ("sc 1"
-- : "+r" (r11), "+r" (r3)
-- : : EV_HCALL_CLOBBERS1
-- );
--
-- return r3;
--}
--
--/**
-- * ev_idle -- wait for next interrupt on this core
-- *
-- * Returns 0 for success, or an error code.
-- */
--static inline unsigned int ev_idle(void)
--{
-- register uintptr_t r11 __asm__("r11");
-- register uintptr_t r3 __asm__("r3");
--
-- r11 = EV_HCALL_TOKEN(EV_IDLE);
--
- asm volatile("bl epapr_hypercall_start"
- __asm__ __volatile__ ("sc 1"
-- : "+r" (r11), "=r" (r3)
-- : : EV_HCALL_CLOBBERS1
-- );
--
-- return r3;
--}
- #endif /* !__ASSEMBLY__ */
-
--#endif
#ifndef __POWERPC_KVM_PARA_H__
#define __POWERPC_KVM_PARA_H__
- #include <linux/types.h>
-
- /*
- * Additions to this struct must only occur at the end, and should be
- * accompanied by a KVM_MAGIC_FEAT flag to advertise that they are present
- * (albeit not necessarily relevant to the current target hardware platform).
- *
- * Struct fields are always 32 or 64 bit aligned, depending on them being 32
- * or 64 bit wide respectively.
- *
- * See Documentation/virtual/kvm/ppc-pv.txt
- */
- struct kvm_vcpu_arch_shared {
- __u64 scratch1;
- __u64 scratch2;
- __u64 scratch3;
- __u64 critical; /* Guest may not get interrupts if == r1 */
- __u64 sprg0;
- __u64 sprg1;
- __u64 sprg2;
- __u64 sprg3;
- __u64 srr0;
- __u64 srr1;
- __u64 dar; /* dear on BookE */
- __u64 msr;
- __u32 dsisr;
- __u32 int_pending; /* Tells the guest if we have an interrupt */
- __u32 sr[16];
- __u32 mas0;
- __u32 mas1;
- __u64 mas7_3;
- __u64 mas2;
- __u32 mas4;
- __u32 mas6;
- __u32 esr;
- __u32 pir;
-
- /*
- * SPRG4-7 are user-readable, so we can only keep these consistent
- * between the shared area and the real registers when there's an
- * intervening exit to KVM. This also applies to SPRG3 on some
- * chips.
- *
- * This suffices for access by guest userspace, since in PR-mode
- * KVM, an exit must occur when changing the guest's MSR[PR].
- * If the guest kernel writes to SPRG3-7 via the shared area, it
- * must also use the shared area for reading while in kernel space.
- */
- __u64 sprg4;
- __u64 sprg5;
- __u64 sprg6;
- __u64 sprg7;
- };
-
- #define KVM_SC_MAGIC_R0 0x4b564d21 /* "KVM!" */
-
- #define KVM_HCALL_TOKEN(num) _EV_HCALL_TOKEN(EV_KVM_VENDOR_ID, num)
-
- #include <asm/epapr_hcalls.h>
-
- #define KVM_FEATURE_MAGIC_PAGE 1
-
- #define KVM_MAGIC_FEAT_SR (1 << 0)
-
- /* MASn, ESR, PIR, and high SPRGs */
- #define KVM_MAGIC_FEAT_MAS0_TO_SPRG7 (1 << 1)
-
- #ifdef __KERNEL__
+ #include <uapi/asm/kvm_para.h>
-
#ifdef CONFIG_KVM_GUEST
#include <linux/of.h>
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+ header-y += auxvec.h
+ header-y += bitsperlong.h
+ header-y += bootx.h
+ header-y += byteorder.h
+ header-y += cputable.h
+ header-y += elf.h
+ header-y += errno.h
+ header-y += fcntl.h
+ header-y += ioctl.h
+ header-y += ioctls.h
+ header-y += ipcbuf.h
+ header-y += kvm.h
+ header-y += kvm_para.h
+ header-y += linkage.h
+ header-y += mman.h
+ header-y += msgbuf.h
+ header-y += nvram.h
+ header-y += param.h
+ header-y += poll.h
+ header-y += posix_types.h
+ header-y += ps3fb.h
+ header-y += ptrace.h
+ header-y += resource.h
+ header-y += seccomp.h
+ header-y += sembuf.h
+ header-y += setup.h
+ header-y += shmbuf.h
+ header-y += sigcontext.h
+ header-y += siginfo.h
+ header-y += signal.h
+ header-y += socket.h
+ header-y += sockios.h
+ header-y += spu_info.h
+ header-y += stat.h
+ header-y += statfs.h
+ header-y += swab.h
+ header-y += termbits.h
+ header-y += termios.h
+ header-y += types.h
+ header-y += ucontext.h
+ header-y += unistd.h
++header-y += epapr_hcalls.h
--- /dev/null
--- /dev/null
++/*
++ * ePAPR hcall interface
++ *
++ * Copyright 2008-2011 Freescale Semiconductor, Inc.
++ *
++ * Author: Timur Tabi <timur@freescale.com>
++ *
++ * This file is provided under a dual BSD/GPL license. When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Freescale Semiconductor nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
++ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
++ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
++ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++/* A "hypercall" is an "sc 1" instruction. This header file file provides C
++ * wrapper functions for the ePAPR hypervisor interface. It is inteded
++ * for use by Linux device drivers and other operating systems.
++ *
++ * The hypercalls are implemented as inline assembly, rather than assembly
++ * language functions in a .S file, for optimization. It allows
++ * the caller to issue the hypercall instruction directly, improving both
++ * performance and memory footprint.
++ */
++
++#ifndef _EPAPR_HCALLS_H
++#define _EPAPR_HCALLS_H
++
++#define EV_BYTE_CHANNEL_SEND 1
++#define EV_BYTE_CHANNEL_RECEIVE 2
++#define EV_BYTE_CHANNEL_POLL 3
++#define EV_INT_SET_CONFIG 4
++#define EV_INT_GET_CONFIG 5
++#define EV_INT_SET_MASK 6
++#define EV_INT_GET_MASK 7
++#define EV_INT_IACK 9
++#define EV_INT_EOI 10
++#define EV_INT_SEND_IPI 11
++#define EV_INT_SET_TASK_PRIORITY 12
++#define EV_INT_GET_TASK_PRIORITY 13
++#define EV_DOORBELL_SEND 14
++#define EV_MSGSND 15
++#define EV_IDLE 16
++
++/* vendor ID: epapr */
++#define EV_LOCAL_VENDOR_ID 0 /* for private use */
++#define EV_EPAPR_VENDOR_ID 1
++#define EV_FSL_VENDOR_ID 2 /* Freescale Semiconductor */
++#define EV_IBM_VENDOR_ID 3 /* IBM */
++#define EV_GHS_VENDOR_ID 4 /* Green Hills Software */
++#define EV_ENEA_VENDOR_ID 5 /* Enea */
++#define EV_WR_VENDOR_ID 6 /* Wind River Systems */
++#define EV_AMCC_VENDOR_ID 7 /* Applied Micro Circuits */
++#define EV_KVM_VENDOR_ID 42 /* KVM */
++
++/* The max number of bytes that a byte channel can send or receive per call */
++#define EV_BYTE_CHANNEL_MAX_BYTES 16
++
++
++#define _EV_HCALL_TOKEN(id, num) (((id) << 16) | (num))
++#define EV_HCALL_TOKEN(hcall_num) _EV_HCALL_TOKEN(EV_EPAPR_VENDOR_ID, hcall_num)
++
++/* epapr return codes */
++#define EV_SUCCESS 0
++#define EV_EPERM 1 /* Operation not permitted */
++#define EV_ENOENT 2 /* Entry Not Found */
++#define EV_EIO 3 /* I/O error occured */
++#define EV_EAGAIN 4 /* The operation had insufficient
++ * resources to complete and should be
++ * retried
++ */
++#define EV_ENOMEM 5 /* There was insufficient memory to
++ * complete the operation */
++#define EV_EFAULT 6 /* Bad guest address */
++#define EV_ENODEV 7 /* No such device */
++#define EV_EINVAL 8 /* An argument supplied to the hcall
++ was out of range or invalid */
++#define EV_INTERNAL 9 /* An internal error occured */
++#define EV_CONFIG 10 /* A configuration error was detected */
++#define EV_INVALID_STATE 11 /* The object is in an invalid state */
++#define EV_UNIMPLEMENTED 12 /* Unimplemented hypercall */
++#define EV_BUFFER_OVERFLOW 13 /* Caller-supplied buffer too small */
++
++#ifndef __ASSEMBLY__
++#include <linux/types.h>
++#include <linux/errno.h>
++#include <asm/byteorder.h>
++
++/*
++ * Hypercall register clobber list
++ *
++ * These macros are used to define the list of clobbered registers during a
++ * hypercall. Technically, registers r0 and r3-r12 are always clobbered,
++ * but the gcc inline assembly syntax does not allow us to specify registers
++ * on the clobber list that are also on the input/output list. Therefore,
++ * the lists of clobbered registers depends on the number of register
++ * parmeters ("+r" and "=r") passed to the hypercall.
++ *
++ * Each assembly block should use one of the HCALL_CLOBBERSx macros. As a
++ * general rule, 'x' is the number of parameters passed to the assembly
++ * block *except* for r11.
++ *
++ * If you're not sure, just use the smallest value of 'x' that does not
++ * generate a compilation error. Because these are static inline functions,
++ * the compiler will only check the clobber list for a function if you
++ * compile code that calls that function.
++ *
++ * r3 and r11 are not included in any clobbers list because they are always
++ * listed as output registers.
++ *
++ * XER, CTR, and LR are currently listed as clobbers because it's uncertain
++ * whether they will be clobbered.
++ *
++ * Note that r11 can be used as an output parameter.
++ *
++ * The "memory" clobber is only necessary for hcalls where the Hypervisor
++ * will read or write guest memory. However, we add it to all hcalls because
++ * the impact is minimal, and we want to ensure that it's present for the
++ * hcalls that need it.
++*/
++
++/* List of common clobbered registers. Do not use this macro. */
++#define EV_HCALL_CLOBBERS "r0", "r12", "xer", "ctr", "lr", "cc", "memory"
++
++#define EV_HCALL_CLOBBERS8 EV_HCALL_CLOBBERS
++#define EV_HCALL_CLOBBERS7 EV_HCALL_CLOBBERS8, "r10"
++#define EV_HCALL_CLOBBERS6 EV_HCALL_CLOBBERS7, "r9"
++#define EV_HCALL_CLOBBERS5 EV_HCALL_CLOBBERS6, "r8"
++#define EV_HCALL_CLOBBERS4 EV_HCALL_CLOBBERS5, "r7"
++#define EV_HCALL_CLOBBERS3 EV_HCALL_CLOBBERS4, "r6"
++#define EV_HCALL_CLOBBERS2 EV_HCALL_CLOBBERS3, "r5"
++#define EV_HCALL_CLOBBERS1 EV_HCALL_CLOBBERS2, "r4"
++
++extern bool epapr_paravirt_enabled;
++extern u32 epapr_hypercall_start[];
++
++/*
++ * We use "uintptr_t" to define a register because it's guaranteed to be a
++ * 32-bit integer on a 32-bit platform, and a 64-bit integer on a 64-bit
++ * platform.
++ *
++ * All registers are either input/output or output only. Registers that are
++ * initialized before making the hypercall are input/output. All
++ * input/output registers are represented with "+r". Output-only registers
++ * are represented with "=r". Do not specify any unused registers. The
++ * clobber list will tell the compiler that the hypercall modifies those
++ * registers, which is good enough.
++ */
++
++/**
++ * ev_int_set_config - configure the specified interrupt
++ * @interrupt: the interrupt number
++ * @config: configuration for this interrupt
++ * @priority: interrupt priority
++ * @destination: destination CPU number
++ *
++ * Returns 0 for success, or an error code.
++ */
++static inline unsigned int ev_int_set_config(unsigned int interrupt,
++ uint32_t config, unsigned int priority, uint32_t destination)
++{
++ register uintptr_t r11 __asm__("r11");
++ register uintptr_t r3 __asm__("r3");
++ register uintptr_t r4 __asm__("r4");
++ register uintptr_t r5 __asm__("r5");
++ register uintptr_t r6 __asm__("r6");
++
++ r11 = EV_HCALL_TOKEN(EV_INT_SET_CONFIG);
++ r3 = interrupt;
++ r4 = config;
++ r5 = priority;
++ r6 = destination;
++
++ asm volatile("bl epapr_hypercall_start"
++ : "+r" (r11), "+r" (r3), "+r" (r4), "+r" (r5), "+r" (r6)
++ : : EV_HCALL_CLOBBERS4
++ );
++
++ return r3;
++}
++
++/**
++ * ev_int_get_config - return the config of the specified interrupt
++ * @interrupt: the interrupt number
++ * @config: returned configuration for this interrupt
++ * @priority: returned interrupt priority
++ * @destination: returned destination CPU number
++ *
++ * Returns 0 for success, or an error code.
++ */
++static inline unsigned int ev_int_get_config(unsigned int interrupt,
++ uint32_t *config, unsigned int *priority, uint32_t *destination)
++{
++ register uintptr_t r11 __asm__("r11");
++ register uintptr_t r3 __asm__("r3");
++ register uintptr_t r4 __asm__("r4");
++ register uintptr_t r5 __asm__("r5");
++ register uintptr_t r6 __asm__("r6");
++
++ r11 = EV_HCALL_TOKEN(EV_INT_GET_CONFIG);
++ r3 = interrupt;
++
++ asm volatile("bl epapr_hypercall_start"
++ : "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5), "=r" (r6)
++ : : EV_HCALL_CLOBBERS4
++ );
++
++ *config = r4;
++ *priority = r5;
++ *destination = r6;
++
++ return r3;
++}
++
++/**
++ * ev_int_set_mask - sets the mask for the specified interrupt source
++ * @interrupt: the interrupt number
++ * @mask: 0=enable interrupts, 1=disable interrupts
++ *
++ * Returns 0 for success, or an error code.
++ */
++static inline unsigned int ev_int_set_mask(unsigned int interrupt,
++ unsigned int mask)
++{
++ register uintptr_t r11 __asm__("r11");
++ register uintptr_t r3 __asm__("r3");
++ register uintptr_t r4 __asm__("r4");
++
++ r11 = EV_HCALL_TOKEN(EV_INT_SET_MASK);
++ r3 = interrupt;
++ r4 = mask;
++
++ asm volatile("bl epapr_hypercall_start"
++ : "+r" (r11), "+r" (r3), "+r" (r4)
++ : : EV_HCALL_CLOBBERS2
++ );
++
++ return r3;
++}
++
++/**
++ * ev_int_get_mask - returns the mask for the specified interrupt source
++ * @interrupt: the interrupt number
++ * @mask: returned mask for this interrupt (0=enabled, 1=disabled)
++ *
++ * Returns 0 for success, or an error code.
++ */
++static inline unsigned int ev_int_get_mask(unsigned int interrupt,
++ unsigned int *mask)
++{
++ register uintptr_t r11 __asm__("r11");
++ register uintptr_t r3 __asm__("r3");
++ register uintptr_t r4 __asm__("r4");
++
++ r11 = EV_HCALL_TOKEN(EV_INT_GET_MASK);
++ r3 = interrupt;
++
++ asm volatile("bl epapr_hypercall_start"
++ : "+r" (r11), "+r" (r3), "=r" (r4)
++ : : EV_HCALL_CLOBBERS2
++ );
++
++ *mask = r4;
++
++ return r3;
++}
++
++/**
++ * ev_int_eoi - signal the end of interrupt processing
++ * @interrupt: the interrupt number
++ *
++ * This function signals the end of processing for the the specified
++ * interrupt, which must be the interrupt currently in service. By
++ * definition, this is also the highest-priority interrupt.
++ *
++ * Returns 0 for success, or an error code.
++ */
++static inline unsigned int ev_int_eoi(unsigned int interrupt)
++{
++ register uintptr_t r11 __asm__("r11");
++ register uintptr_t r3 __asm__("r3");
++
++ r11 = EV_HCALL_TOKEN(EV_INT_EOI);
++ r3 = interrupt;
++
++ asm volatile("bl epapr_hypercall_start"
++ : "+r" (r11), "+r" (r3)
++ : : EV_HCALL_CLOBBERS1
++ );
++
++ return r3;
++}
++
++/**
++ * ev_byte_channel_send - send characters to a byte stream
++ * @handle: byte stream handle
++ * @count: (input) num of chars to send, (output) num chars sent
++ * @buffer: pointer to a 16-byte buffer
++ *
++ * @buffer must be at least 16 bytes long, because all 16 bytes will be
++ * read from memory into registers, even if count < 16.
++ *
++ * Returns 0 for success, or an error code.
++ */
++static inline unsigned int ev_byte_channel_send(unsigned int handle,
++ unsigned int *count, const char buffer[EV_BYTE_CHANNEL_MAX_BYTES])
++{
++ register uintptr_t r11 __asm__("r11");
++ register uintptr_t r3 __asm__("r3");
++ register uintptr_t r4 __asm__("r4");
++ register uintptr_t r5 __asm__("r5");
++ register uintptr_t r6 __asm__("r6");
++ register uintptr_t r7 __asm__("r7");
++ register uintptr_t r8 __asm__("r8");
++ const uint32_t *p = (const uint32_t *) buffer;
++
++ r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_SEND);
++ r3 = handle;
++ r4 = *count;
++ r5 = be32_to_cpu(p[0]);
++ r6 = be32_to_cpu(p[1]);
++ r7 = be32_to_cpu(p[2]);
++ r8 = be32_to_cpu(p[3]);
++
++ asm volatile("bl epapr_hypercall_start"
++ : "+r" (r11), "+r" (r3),
++ "+r" (r4), "+r" (r5), "+r" (r6), "+r" (r7), "+r" (r8)
++ : : EV_HCALL_CLOBBERS6
++ );
++
++ *count = r4;
++
++ return r3;
++}
++
++/**
++ * ev_byte_channel_receive - fetch characters from a byte channel
++ * @handle: byte channel handle
++ * @count: (input) max num of chars to receive, (output) num chars received
++ * @buffer: pointer to a 16-byte buffer
++ *
++ * The size of @buffer must be at least 16 bytes, even if you request fewer
++ * than 16 characters, because we always write 16 bytes to @buffer. This is
++ * for performance reasons.
++ *
++ * Returns 0 for success, or an error code.
++ */
++static inline unsigned int ev_byte_channel_receive(unsigned int handle,
++ unsigned int *count, char buffer[EV_BYTE_CHANNEL_MAX_BYTES])
++{
++ register uintptr_t r11 __asm__("r11");
++ register uintptr_t r3 __asm__("r3");
++ register uintptr_t r4 __asm__("r4");
++ register uintptr_t r5 __asm__("r5");
++ register uintptr_t r6 __asm__("r6");
++ register uintptr_t r7 __asm__("r7");
++ register uintptr_t r8 __asm__("r8");
++ uint32_t *p = (uint32_t *) buffer;
++
++ r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_RECEIVE);
++ r3 = handle;
++ r4 = *count;
++
++ asm volatile("bl epapr_hypercall_start"
++ : "+r" (r11), "+r" (r3), "+r" (r4),
++ "=r" (r5), "=r" (r6), "=r" (r7), "=r" (r8)
++ : : EV_HCALL_CLOBBERS6
++ );
++
++ *count = r4;
++ p[0] = cpu_to_be32(r5);
++ p[1] = cpu_to_be32(r6);
++ p[2] = cpu_to_be32(r7);
++ p[3] = cpu_to_be32(r8);
++
++ return r3;
++}
++
++/**
++ * ev_byte_channel_poll - returns the status of the byte channel buffers
++ * @handle: byte channel handle
++ * @rx_count: returned count of bytes in receive queue
++ * @tx_count: returned count of free space in transmit queue
++ *
++ * This function reports the amount of data in the receive queue (i.e. the
++ * number of bytes you can read), and the amount of free space in the transmit
++ * queue (i.e. the number of bytes you can write).
++ *
++ * Returns 0 for success, or an error code.
++ */
++static inline unsigned int ev_byte_channel_poll(unsigned int handle,
++ unsigned int *rx_count, unsigned int *tx_count)
++{
++ register uintptr_t r11 __asm__("r11");
++ register uintptr_t r3 __asm__("r3");
++ register uintptr_t r4 __asm__("r4");
++ register uintptr_t r5 __asm__("r5");
++
++ r11 = EV_HCALL_TOKEN(EV_BYTE_CHANNEL_POLL);
++ r3 = handle;
++
++ asm volatile("bl epapr_hypercall_start"
++ : "+r" (r11), "+r" (r3), "=r" (r4), "=r" (r5)
++ : : EV_HCALL_CLOBBERS3
++ );
++
++ *rx_count = r4;
++ *tx_count = r5;
++
++ return r3;
++}
++
++/**
++ * ev_int_iack - acknowledge an interrupt
++ * @handle: handle to the target interrupt controller
++ * @vector: returned interrupt vector
++ *
++ * If handle is zero, the function returns the next interrupt source
++ * number to be handled irrespective of the hierarchy or cascading
++ * of interrupt controllers. If non-zero, specifies a handle to the
++ * interrupt controller that is the target of the acknowledge.
++ *
++ * Returns 0 for success, or an error code.
++ */
++static inline unsigned int ev_int_iack(unsigned int handle,
++ unsigned int *vector)
++{
++ register uintptr_t r11 __asm__("r11");
++ register uintptr_t r3 __asm__("r3");
++ register uintptr_t r4 __asm__("r4");
++
++ r11 = EV_HCALL_TOKEN(EV_INT_IACK);
++ r3 = handle;
++
++ asm volatile("bl epapr_hypercall_start"
++ : "+r" (r11), "+r" (r3), "=r" (r4)
++ : : EV_HCALL_CLOBBERS2
++ );
++
++ *vector = r4;
++
++ return r3;
++}
++
++/**
++ * ev_doorbell_send - send a doorbell to another partition
++ * @handle: doorbell send handle
++ *
++ * Returns 0 for success, or an error code.
++ */
++static inline unsigned int ev_doorbell_send(unsigned int handle)
++{
++ register uintptr_t r11 __asm__("r11");
++ register uintptr_t r3 __asm__("r3");
++
++ r11 = EV_HCALL_TOKEN(EV_DOORBELL_SEND);
++ r3 = handle;
++
++ asm volatile("bl epapr_hypercall_start"
++ : "+r" (r11), "+r" (r3)
++ : : EV_HCALL_CLOBBERS1
++ );
++
++ return r3;
++}
++
++/**
++ * ev_idle -- wait for next interrupt on this core
++ *
++ * Returns 0 for success, or an error code.
++ */
++static inline unsigned int ev_idle(void)
++{
++ register uintptr_t r11 __asm__("r11");
++ register uintptr_t r3 __asm__("r3");
++
++ r11 = EV_HCALL_TOKEN(EV_IDLE);
++
++ asm volatile("bl epapr_hypercall_start"
++ : "+r" (r11), "=r" (r3)
++ : : EV_HCALL_CLOBBERS1
++ );
++
++ return r3;
++}
++#endif /* !__ASSEMBLY__ */
++#endif
--- /dev/null
+ /*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+ #ifndef __LINUX_KVM_POWERPC_H
+ #define __LINUX_KVM_POWERPC_H
+
+ #include <linux/types.h>
+
+ /* Select powerpc specific features in <linux/kvm.h> */
+ #define __KVM_HAVE_SPAPR_TCE
+ #define __KVM_HAVE_PPC_SMT
+
+ struct kvm_regs {
+ __u64 pc;
+ __u64 cr;
+ __u64 ctr;
+ __u64 lr;
+ __u64 xer;
+ __u64 msr;
+ __u64 srr0;
+ __u64 srr1;
+ __u64 pid;
+
+ __u64 sprg0;
+ __u64 sprg1;
+ __u64 sprg2;
+ __u64 sprg3;
+ __u64 sprg4;
+ __u64 sprg5;
+ __u64 sprg6;
+ __u64 sprg7;
+
+ __u64 gpr[32];
+ };
+
+ #define KVM_SREGS_E_IMPL_NONE 0
+ #define KVM_SREGS_E_IMPL_FSL 1
+
+ #define KVM_SREGS_E_FSL_PIDn (1 << 0) /* PID1/PID2 */
+
+ /*
+ * Feature bits indicate which sections of the sregs struct are valid,
+ * both in KVM_GET_SREGS and KVM_SET_SREGS. On KVM_SET_SREGS, registers
+ * corresponding to unset feature bits will not be modified. This allows
+ * restoring a checkpoint made without that feature, while keeping the
+ * default values of the new registers.
+ *
+ * KVM_SREGS_E_BASE contains:
+ * CSRR0/1 (refers to SRR2/3 on 40x)
+ * ESR
+ * DEAR
+ * MCSR
+ * TSR
+ * TCR
+ * DEC
+ * TB
+ * VRSAVE (USPRG0)
+ */
+ #define KVM_SREGS_E_BASE (1 << 0)
+
+ /*
+ * KVM_SREGS_E_ARCH206 contains:
+ *
+ * PIR
+ * MCSRR0/1
+ * DECAR
+ * IVPR
+ */
+ #define KVM_SREGS_E_ARCH206 (1 << 1)
+
+ /*
+ * Contains EPCR, plus the upper half of 64-bit registers
+ * that are 32-bit on 32-bit implementations.
+ */
+ #define KVM_SREGS_E_64 (1 << 2)
+
+ #define KVM_SREGS_E_SPRG8 (1 << 3)
+ #define KVM_SREGS_E_MCIVPR (1 << 4)
+
+ /*
+ * IVORs are used -- contains IVOR0-15, plus additional IVORs
+ * in combination with an appropriate feature bit.
+ */
+ #define KVM_SREGS_E_IVOR (1 << 5)
+
+ /*
+ * Contains MAS0-4, MAS6-7, TLBnCFG, MMUCFG.
+ * Also TLBnPS if MMUCFG[MAVN] = 1.
+ */
+ #define KVM_SREGS_E_ARCH206_MMU (1 << 6)
+
+ /* DBSR, DBCR, IAC, DAC, DVC */
+ #define KVM_SREGS_E_DEBUG (1 << 7)
+
+ /* Enhanced debug -- DSRR0/1, SPRG9 */
+ #define KVM_SREGS_E_ED (1 << 8)
+
+ /* Embedded Floating Point (SPE) -- IVOR32-34 if KVM_SREGS_E_IVOR */
+ #define KVM_SREGS_E_SPE (1 << 9)
+
+ /* External Proxy (EXP) -- EPR */
+ #define KVM_SREGS_EXP (1 << 10)
+
+ /* External PID (E.PD) -- EPSC/EPLC */
+ #define KVM_SREGS_E_PD (1 << 11)
+
+ /* Processor Control (E.PC) -- IVOR36-37 if KVM_SREGS_E_IVOR */
+ #define KVM_SREGS_E_PC (1 << 12)
+
+ /* Page table (E.PT) -- EPTCFG */
+ #define KVM_SREGS_E_PT (1 << 13)
+
+ /* Embedded Performance Monitor (E.PM) -- IVOR35 if KVM_SREGS_E_IVOR */
+ #define KVM_SREGS_E_PM (1 << 14)
+
+ /*
+ * Special updates:
+ *
+ * Some registers may change even while a vcpu is not running.
+ * To avoid losing these changes, by default these registers are
+ * not updated by KVM_SET_SREGS. To force an update, set the bit
+ * in u.e.update_special corresponding to the register to be updated.
+ *
+ * The update_special field is zero on return from KVM_GET_SREGS.
+ *
+ * When restoring a checkpoint, the caller can set update_special
+ * to 0xffffffff to ensure that everything is restored, even new features
+ * that the caller doesn't know about.
+ */
+ #define KVM_SREGS_E_UPDATE_MCSR (1 << 0)
+ #define KVM_SREGS_E_UPDATE_TSR (1 << 1)
+ #define KVM_SREGS_E_UPDATE_DEC (1 << 2)
+ #define KVM_SREGS_E_UPDATE_DBSR (1 << 3)
+
+ /*
+ * In KVM_SET_SREGS, reserved/pad fields must be left untouched from a
+ * previous KVM_GET_REGS.
+ *
+ * Unless otherwise indicated, setting any register with KVM_SET_SREGS
+ * directly sets its value. It does not trigger any special semantics such
+ * as write-one-to-clear. Calling KVM_SET_SREGS on an unmodified struct
+ * just received from KVM_GET_SREGS is always a no-op.
+ */
+ struct kvm_sregs {
+ __u32 pvr;
+ union {
+ struct {
+ __u64 sdr1;
+ struct {
+ struct {
+ __u64 slbe;
+ __u64 slbv;
+ } slb[64];
+ } ppc64;
+ struct {
+ __u32 sr[16];
+ __u64 ibat[8];
+ __u64 dbat[8];
+ } ppc32;
+ } s;
+ struct {
+ union {
+ struct { /* KVM_SREGS_E_IMPL_FSL */
+ __u32 features; /* KVM_SREGS_E_FSL_ */
+ __u32 svr;
+ __u64 mcar;
+ __u32 hid0;
+
+ /* KVM_SREGS_E_FSL_PIDn */
+ __u32 pid1, pid2;
+ } fsl;
+ __u8 pad[256];
+ } impl;
+
+ __u32 features; /* KVM_SREGS_E_ */
+ __u32 impl_id; /* KVM_SREGS_E_IMPL_ */
+ __u32 update_special; /* KVM_SREGS_E_UPDATE_ */
+ __u32 pir; /* read-only */
+ __u64 sprg8;
+ __u64 sprg9; /* E.ED */
+ __u64 csrr0;
+ __u64 dsrr0; /* E.ED */
+ __u64 mcsrr0;
+ __u32 csrr1;
+ __u32 dsrr1; /* E.ED */
+ __u32 mcsrr1;
+ __u32 esr;
+ __u64 dear;
+ __u64 ivpr;
+ __u64 mcivpr;
+ __u64 mcsr; /* KVM_SREGS_E_UPDATE_MCSR */
+
+ __u32 tsr; /* KVM_SREGS_E_UPDATE_TSR */
+ __u32 tcr;
+ __u32 decar;
+ __u32 dec; /* KVM_SREGS_E_UPDATE_DEC */
+
+ /*
+ * Userspace can read TB directly, but the
+ * value reported here is consistent with "dec".
+ *
+ * Read-only.
+ */
+ __u64 tb;
+
+ __u32 dbsr; /* KVM_SREGS_E_UPDATE_DBSR */
+ __u32 dbcr[3];
++ /*
++ * iac/dac registers are 64bit wide, while this API
++ * interface provides only lower 32 bits on 64 bit
++ * processors. ONE_REG interface is added for 64bit
++ * iac/dac registers.
++ */
+ __u32 iac[4];
+ __u32 dac[2];
+ __u32 dvc[2];
+ __u8 num_iac; /* read-only */
+ __u8 num_dac; /* read-only */
+ __u8 num_dvc; /* read-only */
+ __u8 pad;
+
+ __u32 epr; /* EXP */
+ __u32 vrsave; /* a.k.a. USPRG0 */
+ __u32 epcr; /* KVM_SREGS_E_64 */
+
+ __u32 mas0;
+ __u32 mas1;
+ __u64 mas2;
+ __u64 mas7_3;
+ __u32 mas4;
+ __u32 mas6;
+
+ __u32 ivor_low[16]; /* IVOR0-15 */
+ __u32 ivor_high[18]; /* IVOR32+, plus room to expand */
+
+ __u32 mmucfg; /* read-only */
+ __u32 eptcfg; /* E.PT, read-only */
+ __u32 tlbcfg[4];/* read-only */
+ __u32 tlbps[4]; /* read-only */
+
+ __u32 eplc, epsc; /* E.PD */
+ } e;
+ __u8 pad[1020];
+ } u;
+ };
+
+ struct kvm_fpu {
+ __u64 fpr[32];
+ };
+
+ struct kvm_debug_exit_arch {
+ };
+
+ /* for KVM_SET_GUEST_DEBUG */
+ struct kvm_guest_debug_arch {
+ };
+
+ /* definition of registers in kvm_run */
+ struct kvm_sync_regs {
+ };
+
+ #define KVM_INTERRUPT_SET -1U
+ #define KVM_INTERRUPT_UNSET -2U
+ #define KVM_INTERRUPT_SET_LEVEL -3U
+
+ #define KVM_CPU_440 1
+ #define KVM_CPU_E500V2 2
+ #define KVM_CPU_3S_32 3
+ #define KVM_CPU_3S_64 4
+ #define KVM_CPU_E500MC 5
+
+ /* for KVM_CAP_SPAPR_TCE */
+ struct kvm_create_spapr_tce {
+ __u64 liobn;
+ __u32 window_size;
+ };
+
+ /* for KVM_ALLOCATE_RMA */
+ struct kvm_allocate_rma {
+ __u64 rma_size;
+ };
+
+ struct kvm_book3e_206_tlb_entry {
+ __u32 mas8;
+ __u32 mas1;
+ __u64 mas2;
+ __u64 mas7_3;
+ };
+
+ struct kvm_book3e_206_tlb_params {
+ /*
+ * For mmu types KVM_MMU_FSL_BOOKE_NOHV and KVM_MMU_FSL_BOOKE_HV:
+ *
+ * - The number of ways of TLB0 must be a power of two between 2 and
+ * 16.
+ * - TLB1 must be fully associative.
+ * - The size of TLB0 must be a multiple of the number of ways, and
+ * the number of sets must be a power of two.
+ * - The size of TLB1 may not exceed 64 entries.
+ * - TLB0 supports 4 KiB pages.
+ * - The page sizes supported by TLB1 are as indicated by
+ * TLB1CFG (if MMUCFG[MAVN] = 0) or TLB1PS (if MMUCFG[MAVN] = 1)
+ * as returned by KVM_GET_SREGS.
+ * - TLB2 and TLB3 are reserved, and their entries in tlb_sizes[]
+ * and tlb_ways[] must be zero.
+ *
+ * tlb_ways[n] = tlb_sizes[n] means the array is fully associative.
+ *
+ * KVM will adjust TLBnCFG based on the sizes configured here,
+ * though arrays greater than 2048 entries will have TLBnCFG[NENTRY]
+ * set to zero.
+ */
+ __u32 tlb_sizes[4];
+ __u32 tlb_ways[4];
+ __u32 reserved[8];
+ };
+
+ #define KVM_REG_PPC_HIOR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x1)
++#define KVM_REG_PPC_IAC1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x2)
++#define KVM_REG_PPC_IAC2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x3)
++#define KVM_REG_PPC_IAC3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x4)
++#define KVM_REG_PPC_IAC4 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x5)
++#define KVM_REG_PPC_DAC1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x6)
++#define KVM_REG_PPC_DAC2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x7)
++#define KVM_REG_PPC_DABR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8)
++#define KVM_REG_PPC_DSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9)
++#define KVM_REG_PPC_PURR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa)
++#define KVM_REG_PPC_SPURR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb)
++#define KVM_REG_PPC_DAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc)
++#define KVM_REG_PPC_DSISR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xd)
++#define KVM_REG_PPC_AMR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xe)
++#define KVM_REG_PPC_UAMOR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xf)
++
++#define KVM_REG_PPC_MMCR0 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10)
++#define KVM_REG_PPC_MMCR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11)
++#define KVM_REG_PPC_MMCRA (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12)
++
++#define KVM_REG_PPC_PMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18)
++#define KVM_REG_PPC_PMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19)
++#define KVM_REG_PPC_PMC3 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1a)
++#define KVM_REG_PPC_PMC4 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1b)
++#define KVM_REG_PPC_PMC5 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1c)
++#define KVM_REG_PPC_PMC6 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1d)
++#define KVM_REG_PPC_PMC7 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1e)
++#define KVM_REG_PPC_PMC8 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1f)
++
++/* 32 floating-point registers */
++#define KVM_REG_PPC_FPR0 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x20)
++#define KVM_REG_PPC_FPR(n) (KVM_REG_PPC_FPR0 + (n))
++#define KVM_REG_PPC_FPR31 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x3f)
++
++/* 32 VMX/Altivec vector registers */
++#define KVM_REG_PPC_VR0 (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x40)
++#define KVM_REG_PPC_VR(n) (KVM_REG_PPC_VR0 + (n))
++#define KVM_REG_PPC_VR31 (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x5f)
++
++/* 32 double-width FP registers for VSX */
++/* High-order halves overlap with FP regs */
++#define KVM_REG_PPC_VSR0 (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x60)
++#define KVM_REG_PPC_VSR(n) (KVM_REG_PPC_VSR0 + (n))
++#define KVM_REG_PPC_VSR31 (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x7f)
++
++/* FP and vector status/control registers */
++#define KVM_REG_PPC_FPSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x80)
++#define KVM_REG_PPC_VSCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x81)
++
++/* Virtual processor areas */
++/* For SLB & DTL, address in high (first) half, length in low half */
++#define KVM_REG_PPC_VPA_ADDR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x82)
++#define KVM_REG_PPC_VPA_SLB (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x83)
++#define KVM_REG_PPC_VPA_DTL (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x84)
+
+ #endif /* __LINUX_KVM_POWERPC_H */
--- /dev/null
-#define HC_VENDOR_KVM (42 << 16)
-#define HC_EV_SUCCESS 0
-#define HC_EV_UNIMPLEMENTED 12
+ /*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+ #ifndef _UAPI__POWERPC_KVM_PARA_H__
+ #define _UAPI__POWERPC_KVM_PARA_H__
+
+ #include <linux/types.h>
+
+ /*
+ * Additions to this struct must only occur at the end, and should be
+ * accompanied by a KVM_MAGIC_FEAT flag to advertise that they are present
+ * (albeit not necessarily relevant to the current target hardware platform).
+ *
+ * Struct fields are always 32 or 64 bit aligned, depending on them being 32
+ * or 64 bit wide respectively.
+ *
+ * See Documentation/virtual/kvm/ppc-pv.txt
+ */
+ struct kvm_vcpu_arch_shared {
+ __u64 scratch1;
+ __u64 scratch2;
+ __u64 scratch3;
+ __u64 critical; /* Guest may not get interrupts if == r1 */
+ __u64 sprg0;
+ __u64 sprg1;
+ __u64 sprg2;
+ __u64 sprg3;
+ __u64 srr0;
+ __u64 srr1;
+ __u64 dar; /* dear on BookE */
+ __u64 msr;
+ __u32 dsisr;
+ __u32 int_pending; /* Tells the guest if we have an interrupt */
+ __u32 sr[16];
+ __u32 mas0;
+ __u32 mas1;
+ __u64 mas7_3;
+ __u64 mas2;
+ __u32 mas4;
+ __u32 mas6;
+ __u32 esr;
+ __u32 pir;
+
+ /*
+ * SPRG4-7 are user-readable, so we can only keep these consistent
+ * between the shared area and the real registers when there's an
+ * intervening exit to KVM. This also applies to SPRG3 on some
+ * chips.
+ *
+ * This suffices for access by guest userspace, since in PR-mode
+ * KVM, an exit must occur when changing the guest's MSR[PR].
+ * If the guest kernel writes to SPRG3-7 via the shared area, it
+ * must also use the shared area for reading while in kernel space.
+ */
+ __u64 sprg4;
+ __u64 sprg5;
+ __u64 sprg6;
+ __u64 sprg7;
+ };
+
+ #define KVM_SC_MAGIC_R0 0x4b564d21 /* "KVM!" */
++
++#define KVM_HCALL_TOKEN(num) _EV_HCALL_TOKEN(EV_KVM_VENDOR_ID, num)
++
++#include <uapi/asm/epapr_hcalls.h>
+
+ #define KVM_FEATURE_MAGIC_PAGE 1
+
+ #define KVM_MAGIC_FEAT_SR (1 << 0)
+
+ /* MASn, ESR, PIR, and high SPRGs */
+ #define KVM_MAGIC_FEAT_MAS0_TO_SPRG7 (1 << 1)
+
+
+ #endif /* _UAPI__POWERPC_KVM_PARA_H__ */
--- /dev/null
-#define KVM_INTERNAL_ERROR_EMULATION 1
-#define KVM_INTERNAL_ERROR_SIMUL_EX 2
+ #ifndef __LINUX_KVM_H
+ #define __LINUX_KVM_H
+
+ /*
+ * Userspace interface for /dev/kvm - kernel based virtual machine
+ *
+ * Note: you must update KVM_API_VERSION if you change this interface.
+ */
+
+ #include <linux/types.h>
+ #include <linux/compiler.h>
+ #include <linux/ioctl.h>
+ #include <asm/kvm.h>
+
+ #define KVM_API_VERSION 12
+
+ /* *** Deprecated interfaces *** */
+
+ #define KVM_TRC_SHIFT 16
+
+ #define KVM_TRC_ENTRYEXIT (1 << KVM_TRC_SHIFT)
+ #define KVM_TRC_HANDLER (1 << (KVM_TRC_SHIFT + 1))
+
+ #define KVM_TRC_VMENTRY (KVM_TRC_ENTRYEXIT + 0x01)
+ #define KVM_TRC_VMEXIT (KVM_TRC_ENTRYEXIT + 0x02)
+ #define KVM_TRC_PAGE_FAULT (KVM_TRC_HANDLER + 0x01)
+
+ #define KVM_TRC_HEAD_SIZE 12
+ #define KVM_TRC_CYCLE_SIZE 8
+ #define KVM_TRC_EXTRA_MAX 7
+
+ #define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02)
+ #define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03)
+ #define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04)
+ #define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05)
+ #define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06)
+ #define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07)
+ #define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08)
+ #define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09)
+ #define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A)
+ #define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B)
+ #define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C)
+ #define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D)
+ #define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E)
+ #define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F)
+ #define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10)
+ #define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11)
+ #define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12)
+ #define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13)
+ #define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14)
+ #define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15)
+ #define KVM_TRC_GTLB_WRITE (KVM_TRC_HANDLER + 0x16)
+ #define KVM_TRC_STLB_WRITE (KVM_TRC_HANDLER + 0x17)
+ #define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18)
+ #define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19)
+
+ struct kvm_user_trace_setup {
+ __u32 buf_size;
+ __u32 buf_nr;
+ };
+
+ #define __KVM_DEPRECATED_MAIN_W_0x06 \
+ _IOW(KVMIO, 0x06, struct kvm_user_trace_setup)
+ #define __KVM_DEPRECATED_MAIN_0x07 _IO(KVMIO, 0x07)
+ #define __KVM_DEPRECATED_MAIN_0x08 _IO(KVMIO, 0x08)
+
+ #define __KVM_DEPRECATED_VM_R_0x70 _IOR(KVMIO, 0x70, struct kvm_assigned_irq)
+
+ struct kvm_breakpoint {
+ __u32 enabled;
+ __u32 padding;
+ __u64 address;
+ };
+
+ struct kvm_debug_guest {
+ __u32 enabled;
+ __u32 pad;
+ struct kvm_breakpoint breakpoints[4];
+ __u32 singlestep;
+ };
+
+ #define __KVM_DEPRECATED_VCPU_W_0x87 _IOW(KVMIO, 0x87, struct kvm_debug_guest)
+
+ /* *** End of deprecated interfaces *** */
+
+
+ /* for KVM_CREATE_MEMORY_REGION */
+ struct kvm_memory_region {
+ __u32 slot;
+ __u32 flags;
+ __u64 guest_phys_addr;
+ __u64 memory_size; /* bytes */
+ };
+
+ /* for KVM_SET_USER_MEMORY_REGION */
+ struct kvm_userspace_memory_region {
+ __u32 slot;
+ __u32 flags;
+ __u64 guest_phys_addr;
+ __u64 memory_size; /* bytes */
+ __u64 userspace_addr; /* start of the userspace allocated memory */
+ };
+
+ /*
+ * The bit 0 ~ bit 15 of kvm_memory_region::flags are visible for userspace,
+ * other bits are reserved for kvm internal use which are defined in
+ * include/linux/kvm_host.h.
+ */
+ #define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0)
+ #define KVM_MEM_READONLY (1UL << 1)
+
+ /* for KVM_IRQ_LINE */
+ struct kvm_irq_level {
+ /*
+ * ACPI gsi notion of irq.
+ * For IA-64 (APIC model) IOAPIC0: irq 0-23; IOAPIC1: irq 24-47..
+ * For X86 (standard AT mode) PIC0/1: irq 0-15. IOAPIC0: 0-23..
+ */
+ union {
+ __u32 irq;
+ __s32 status;
+ };
+ __u32 level;
+ };
+
+
+ struct kvm_irqchip {
+ __u32 chip_id;
+ __u32 pad;
+ union {
+ char dummy[512]; /* reserving space */
+ #ifdef __KVM_HAVE_PIT
+ struct kvm_pic_state pic;
+ #endif
+ #ifdef __KVM_HAVE_IOAPIC
+ struct kvm_ioapic_state ioapic;
+ #endif
+ } chip;
+ };
+
+ /* for KVM_CREATE_PIT2 */
+ struct kvm_pit_config {
+ __u32 flags;
+ __u32 pad[15];
+ };
+
+ #define KVM_PIT_SPEAKER_DUMMY 1
+
+ #define KVM_EXIT_UNKNOWN 0
+ #define KVM_EXIT_EXCEPTION 1
+ #define KVM_EXIT_IO 2
+ #define KVM_EXIT_HYPERCALL 3
+ #define KVM_EXIT_DEBUG 4
+ #define KVM_EXIT_HLT 5
+ #define KVM_EXIT_MMIO 6
+ #define KVM_EXIT_IRQ_WINDOW_OPEN 7
+ #define KVM_EXIT_SHUTDOWN 8
+ #define KVM_EXIT_FAIL_ENTRY 9
+ #define KVM_EXIT_INTR 10
+ #define KVM_EXIT_SET_TPR 11
+ #define KVM_EXIT_TPR_ACCESS 12
+ #define KVM_EXIT_S390_SIEIC 13
+ #define KVM_EXIT_S390_RESET 14
+ #define KVM_EXIT_DCR 15
+ #define KVM_EXIT_NMI 16
+ #define KVM_EXIT_INTERNAL_ERROR 17
+ #define KVM_EXIT_OSI 18
+ #define KVM_EXIT_PAPR_HCALL 19
+ #define KVM_EXIT_S390_UCONTROL 20
++#define KVM_EXIT_WATCHDOG 21
+
+ /* For KVM_EXIT_INTERNAL_ERROR */
-#define KVM_CREATE_SPAPR_TCE _IOW(KVMIO, 0xa8, struct kvm_create_spapr_tce)
-/* Available with KVM_CAP_RMA */
-#define KVM_ALLOCATE_RMA _IOR(KVMIO, 0xa9, struct kvm_allocate_rma)
++/* Emulate instruction failed. */
++#define KVM_INTERNAL_ERROR_EMULATION 1
++/* Encounter unexpected simultaneous exceptions. */
++#define KVM_INTERNAL_ERROR_SIMUL_EX 2
++/* Encounter unexpected vm-exit due to delivery event. */
++#define KVM_INTERNAL_ERROR_DELIVERY_EV 3
+
+ /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */
+ struct kvm_run {
+ /* in */
+ __u8 request_interrupt_window;
+ __u8 padding1[7];
+
+ /* out */
+ __u32 exit_reason;
+ __u8 ready_for_interrupt_injection;
+ __u8 if_flag;
+ __u8 padding2[2];
+
+ /* in (pre_kvm_run), out (post_kvm_run) */
+ __u64 cr8;
+ __u64 apic_base;
+
+ #ifdef __KVM_S390
+ /* the processor status word for s390 */
+ __u64 psw_mask; /* psw upper half */
+ __u64 psw_addr; /* psw lower half */
+ #endif
+ union {
+ /* KVM_EXIT_UNKNOWN */
+ struct {
+ __u64 hardware_exit_reason;
+ } hw;
+ /* KVM_EXIT_FAIL_ENTRY */
+ struct {
+ __u64 hardware_entry_failure_reason;
+ } fail_entry;
+ /* KVM_EXIT_EXCEPTION */
+ struct {
+ __u32 exception;
+ __u32 error_code;
+ } ex;
+ /* KVM_EXIT_IO */
+ struct {
+ #define KVM_EXIT_IO_IN 0
+ #define KVM_EXIT_IO_OUT 1
+ __u8 direction;
+ __u8 size; /* bytes */
+ __u16 port;
+ __u32 count;
+ __u64 data_offset; /* relative to kvm_run start */
+ } io;
+ struct {
+ struct kvm_debug_exit_arch arch;
+ } debug;
+ /* KVM_EXIT_MMIO */
+ struct {
+ __u64 phys_addr;
+ __u8 data[8];
+ __u32 len;
+ __u8 is_write;
+ } mmio;
+ /* KVM_EXIT_HYPERCALL */
+ struct {
+ __u64 nr;
+ __u64 args[6];
+ __u64 ret;
+ __u32 longmode;
+ __u32 pad;
+ } hypercall;
+ /* KVM_EXIT_TPR_ACCESS */
+ struct {
+ __u64 rip;
+ __u32 is_write;
+ __u32 pad;
+ } tpr_access;
+ /* KVM_EXIT_S390_SIEIC */
+ struct {
+ __u8 icptcode;
+ __u16 ipa;
+ __u32 ipb;
+ } s390_sieic;
+ /* KVM_EXIT_S390_RESET */
+ #define KVM_S390_RESET_POR 1
+ #define KVM_S390_RESET_CLEAR 2
+ #define KVM_S390_RESET_SUBSYSTEM 4
+ #define KVM_S390_RESET_CPU_INIT 8
+ #define KVM_S390_RESET_IPL 16
+ __u64 s390_reset_flags;
+ /* KVM_EXIT_S390_UCONTROL */
+ struct {
+ __u64 trans_exc_code;
+ __u32 pgm_code;
+ } s390_ucontrol;
+ /* KVM_EXIT_DCR */
+ struct {
+ __u32 dcrn;
+ __u32 data;
+ __u8 is_write;
+ } dcr;
+ struct {
+ __u32 suberror;
+ /* Available with KVM_CAP_INTERNAL_ERROR_DATA: */
+ __u32 ndata;
+ __u64 data[16];
+ } internal;
+ /* KVM_EXIT_OSI */
+ struct {
+ __u64 gprs[32];
+ } osi;
+ struct {
+ __u64 nr;
+ __u64 ret;
+ __u64 args[9];
+ } papr_hcall;
+ /* Fix the size of the union. */
+ char padding[256];
+ };
+
+ /*
+ * shared registers between kvm and userspace.
+ * kvm_valid_regs specifies the register classes set by the host
+ * kvm_dirty_regs specified the register classes dirtied by userspace
+ * struct kvm_sync_regs is architecture specific, as well as the
+ * bits for kvm_valid_regs and kvm_dirty_regs
+ */
+ __u64 kvm_valid_regs;
+ __u64 kvm_dirty_regs;
+ union {
+ struct kvm_sync_regs regs;
+ char padding[1024];
+ } s;
+ };
+
+ /* for KVM_REGISTER_COALESCED_MMIO / KVM_UNREGISTER_COALESCED_MMIO */
+
+ struct kvm_coalesced_mmio_zone {
+ __u64 addr;
+ __u32 size;
+ __u32 pad;
+ };
+
+ struct kvm_coalesced_mmio {
+ __u64 phys_addr;
+ __u32 len;
+ __u32 pad;
+ __u8 data[8];
+ };
+
+ struct kvm_coalesced_mmio_ring {
+ __u32 first, last;
+ struct kvm_coalesced_mmio coalesced_mmio[0];
+ };
+
+ #define KVM_COALESCED_MMIO_MAX \
+ ((PAGE_SIZE - sizeof(struct kvm_coalesced_mmio_ring)) / \
+ sizeof(struct kvm_coalesced_mmio))
+
+ /* for KVM_TRANSLATE */
+ struct kvm_translation {
+ /* in */
+ __u64 linear_address;
+
+ /* out */
+ __u64 physical_address;
+ __u8 valid;
+ __u8 writeable;
+ __u8 usermode;
+ __u8 pad[5];
+ };
+
+ /* for KVM_INTERRUPT */
+ struct kvm_interrupt {
+ /* in */
+ __u32 irq;
+ };
+
+ /* for KVM_GET_DIRTY_LOG */
+ struct kvm_dirty_log {
+ __u32 slot;
+ __u32 padding1;
+ union {
+ void __user *dirty_bitmap; /* one bit per page */
+ __u64 padding2;
+ };
+ };
+
+ /* for KVM_SET_SIGNAL_MASK */
+ struct kvm_signal_mask {
+ __u32 len;
+ __u8 sigset[0];
+ };
+
+ /* for KVM_TPR_ACCESS_REPORTING */
+ struct kvm_tpr_access_ctl {
+ __u32 enabled;
+ __u32 flags;
+ __u32 reserved[8];
+ };
+
+ /* for KVM_SET_VAPIC_ADDR */
+ struct kvm_vapic_addr {
+ __u64 vapic_addr;
+ };
+
+ /* for KVM_SET_MPSTATE */
+
+ #define KVM_MP_STATE_RUNNABLE 0
+ #define KVM_MP_STATE_UNINITIALIZED 1
+ #define KVM_MP_STATE_INIT_RECEIVED 2
+ #define KVM_MP_STATE_HALTED 3
+ #define KVM_MP_STATE_SIPI_RECEIVED 4
+
+ struct kvm_mp_state {
+ __u32 mp_state;
+ };
+
+ struct kvm_s390_psw {
+ __u64 mask;
+ __u64 addr;
+ };
+
+ /* valid values for type in kvm_s390_interrupt */
+ #define KVM_S390_SIGP_STOP 0xfffe0000u
+ #define KVM_S390_PROGRAM_INT 0xfffe0001u
+ #define KVM_S390_SIGP_SET_PREFIX 0xfffe0002u
+ #define KVM_S390_RESTART 0xfffe0003u
+ #define KVM_S390_INT_VIRTIO 0xffff2603u
+ #define KVM_S390_INT_SERVICE 0xffff2401u
+ #define KVM_S390_INT_EMERGENCY 0xffff1201u
+ #define KVM_S390_INT_EXTERNAL_CALL 0xffff1202u
+
+ struct kvm_s390_interrupt {
+ __u32 type;
+ __u32 parm;
+ __u64 parm64;
+ };
+
+ /* for KVM_SET_GUEST_DEBUG */
+
+ #define KVM_GUESTDBG_ENABLE 0x00000001
+ #define KVM_GUESTDBG_SINGLESTEP 0x00000002
+
+ struct kvm_guest_debug {
+ __u32 control;
+ __u32 pad;
+ struct kvm_guest_debug_arch arch;
+ };
+
+ enum {
+ kvm_ioeventfd_flag_nr_datamatch,
+ kvm_ioeventfd_flag_nr_pio,
+ kvm_ioeventfd_flag_nr_deassign,
+ kvm_ioeventfd_flag_nr_max,
+ };
+
+ #define KVM_IOEVENTFD_FLAG_DATAMATCH (1 << kvm_ioeventfd_flag_nr_datamatch)
+ #define KVM_IOEVENTFD_FLAG_PIO (1 << kvm_ioeventfd_flag_nr_pio)
+ #define KVM_IOEVENTFD_FLAG_DEASSIGN (1 << kvm_ioeventfd_flag_nr_deassign)
+
+ #define KVM_IOEVENTFD_VALID_FLAG_MASK ((1 << kvm_ioeventfd_flag_nr_max) - 1)
+
+ struct kvm_ioeventfd {
+ __u64 datamatch;
+ __u64 addr; /* legal pio/mmio address */
+ __u32 len; /* 1, 2, 4, or 8 bytes */
+ __s32 fd;
+ __u32 flags;
+ __u8 pad[36];
+ };
+
+ /* for KVM_ENABLE_CAP */
+ struct kvm_enable_cap {
+ /* in */
+ __u32 cap;
+ __u32 flags;
+ __u64 args[4];
+ __u8 pad[64];
+ };
+
+ /* for KVM_PPC_GET_PVINFO */
+ struct kvm_ppc_pvinfo {
+ /* out */
+ __u32 flags;
+ __u32 hcall[4];
+ __u8 pad[108];
+ };
+
+ /* for KVM_PPC_GET_SMMU_INFO */
+ #define KVM_PPC_PAGE_SIZES_MAX_SZ 8
+
+ struct kvm_ppc_one_page_size {
+ __u32 page_shift; /* Page shift (or 0) */
+ __u32 pte_enc; /* Encoding in the HPTE (>>12) */
+ };
+
+ struct kvm_ppc_one_seg_page_size {
+ __u32 page_shift; /* Base page shift of segment (or 0) */
+ __u32 slb_enc; /* SLB encoding for BookS */
+ struct kvm_ppc_one_page_size enc[KVM_PPC_PAGE_SIZES_MAX_SZ];
+ };
+
+ #define KVM_PPC_PAGE_SIZES_REAL 0x00000001
+ #define KVM_PPC_1T_SEGMENTS 0x00000002
+
+ struct kvm_ppc_smmu_info {
+ __u64 flags;
+ __u32 slb_size;
+ __u32 pad;
+ struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
+ };
+
++#define KVM_PPC_PVINFO_FLAGS_EV_IDLE (1<<0)
++
+ #define KVMIO 0xAE
+
+ /* machine type bits, to be used as argument to KVM_CREATE_VM */
+ #define KVM_VM_S390_UCONTROL 1
+
+ #define KVM_S390_SIE_PAGE_OFFSET 1
+
+ /*
+ * ioctls for /dev/kvm fds:
+ */
+ #define KVM_GET_API_VERSION _IO(KVMIO, 0x00)
+ #define KVM_CREATE_VM _IO(KVMIO, 0x01) /* returns a VM fd */
+ #define KVM_GET_MSR_INDEX_LIST _IOWR(KVMIO, 0x02, struct kvm_msr_list)
+
+ #define KVM_S390_ENABLE_SIE _IO(KVMIO, 0x06)
+ /*
+ * Check if a kvm extension is available. Argument is extension number,
+ * return is 1 (yes) or 0 (no, sorry).
+ */
+ #define KVM_CHECK_EXTENSION _IO(KVMIO, 0x03)
+ /*
+ * Get size for mmap(vcpu_fd)
+ */
+ #define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */
+ #define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x05, struct kvm_cpuid2)
+ #define KVM_TRACE_ENABLE __KVM_DEPRECATED_MAIN_W_0x06
+ #define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07
+ #define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08
+
+ /*
+ * Extension capability list.
+ */
+ #define KVM_CAP_IRQCHIP 0
+ #define KVM_CAP_HLT 1
+ #define KVM_CAP_MMU_SHADOW_CACHE_CONTROL 2
+ #define KVM_CAP_USER_MEMORY 3
+ #define KVM_CAP_SET_TSS_ADDR 4
+ #define KVM_CAP_VAPIC 6
+ #define KVM_CAP_EXT_CPUID 7
+ #define KVM_CAP_CLOCKSOURCE 8
+ #define KVM_CAP_NR_VCPUS 9 /* returns recommended max vcpus per vm */
+ #define KVM_CAP_NR_MEMSLOTS 10 /* returns max memory slots per vm */
+ #define KVM_CAP_PIT 11
+ #define KVM_CAP_NOP_IO_DELAY 12
+ #define KVM_CAP_PV_MMU 13
+ #define KVM_CAP_MP_STATE 14
+ #define KVM_CAP_COALESCED_MMIO 15
+ #define KVM_CAP_SYNC_MMU 16 /* Changes to host mmap are reflected in guest */
+ #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
+ #define KVM_CAP_DEVICE_ASSIGNMENT 17
+ #endif
+ #define KVM_CAP_IOMMU 18
+ #ifdef __KVM_HAVE_MSI
+ #define KVM_CAP_DEVICE_MSI 20
+ #endif
+ /* Bug in KVM_SET_USER_MEMORY_REGION fixed: */
+ #define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21
+ #ifdef __KVM_HAVE_USER_NMI
+ #define KVM_CAP_USER_NMI 22
+ #endif
+ #ifdef __KVM_HAVE_GUEST_DEBUG
+ #define KVM_CAP_SET_GUEST_DEBUG 23
+ #endif
+ #ifdef __KVM_HAVE_PIT
+ #define KVM_CAP_REINJECT_CONTROL 24
+ #endif
+ #ifdef __KVM_HAVE_IOAPIC
+ #define KVM_CAP_IRQ_ROUTING 25
+ #endif
+ #define KVM_CAP_IRQ_INJECT_STATUS 26
+ #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
+ #define KVM_CAP_DEVICE_DEASSIGNMENT 27
+ #endif
+ #ifdef __KVM_HAVE_MSIX
+ #define KVM_CAP_DEVICE_MSIX 28
+ #endif
+ #define KVM_CAP_ASSIGN_DEV_IRQ 29
+ /* Another bug in KVM_SET_USER_MEMORY_REGION fixed: */
+ #define KVM_CAP_JOIN_MEMORY_REGIONS_WORKS 30
+ #ifdef __KVM_HAVE_MCE
+ #define KVM_CAP_MCE 31
+ #endif
+ #define KVM_CAP_IRQFD 32
+ #ifdef __KVM_HAVE_PIT
+ #define KVM_CAP_PIT2 33
+ #endif
+ #define KVM_CAP_SET_BOOT_CPU_ID 34
+ #ifdef __KVM_HAVE_PIT_STATE2
+ #define KVM_CAP_PIT_STATE2 35
+ #endif
+ #define KVM_CAP_IOEVENTFD 36
+ #define KVM_CAP_SET_IDENTITY_MAP_ADDR 37
+ #ifdef __KVM_HAVE_XEN_HVM
+ #define KVM_CAP_XEN_HVM 38
+ #endif
+ #define KVM_CAP_ADJUST_CLOCK 39
+ #define KVM_CAP_INTERNAL_ERROR_DATA 40
+ #ifdef __KVM_HAVE_VCPU_EVENTS
+ #define KVM_CAP_VCPU_EVENTS 41
+ #endif
+ #define KVM_CAP_S390_PSW 42
+ #define KVM_CAP_PPC_SEGSTATE 43
+ #define KVM_CAP_HYPERV 44
+ #define KVM_CAP_HYPERV_VAPIC 45
+ #define KVM_CAP_HYPERV_SPIN 46
+ #define KVM_CAP_PCI_SEGMENT 47
+ #define KVM_CAP_PPC_PAIRED_SINGLES 48
+ #define KVM_CAP_INTR_SHADOW 49
+ #ifdef __KVM_HAVE_DEBUGREGS
+ #define KVM_CAP_DEBUGREGS 50
+ #endif
+ #define KVM_CAP_X86_ROBUST_SINGLESTEP 51
+ #define KVM_CAP_PPC_OSI 52
+ #define KVM_CAP_PPC_UNSET_IRQ 53
+ #define KVM_CAP_ENABLE_CAP 54
+ #ifdef __KVM_HAVE_XSAVE
+ #define KVM_CAP_XSAVE 55
+ #endif
+ #ifdef __KVM_HAVE_XCRS
+ #define KVM_CAP_XCRS 56
+ #endif
+ #define KVM_CAP_PPC_GET_PVINFO 57
+ #define KVM_CAP_PPC_IRQ_LEVEL 58
+ #define KVM_CAP_ASYNC_PF 59
+ #define KVM_CAP_TSC_CONTROL 60
+ #define KVM_CAP_GET_TSC_KHZ 61
+ #define KVM_CAP_PPC_BOOKE_SREGS 62
+ #define KVM_CAP_SPAPR_TCE 63
+ #define KVM_CAP_PPC_SMT 64
+ #define KVM_CAP_PPC_RMA 65
+ #define KVM_CAP_MAX_VCPUS 66 /* returns max vcpus per vm */
+ #define KVM_CAP_PPC_HIOR 67
+ #define KVM_CAP_PPC_PAPR 68
+ #define KVM_CAP_SW_TLB 69
+ #define KVM_CAP_ONE_REG 70
+ #define KVM_CAP_S390_GMAP 71
+ #define KVM_CAP_TSC_DEADLINE_TIMER 72
+ #define KVM_CAP_S390_UCONTROL 73
+ #define KVM_CAP_SYNC_REGS 74
+ #define KVM_CAP_PCI_2_3 75
+ #define KVM_CAP_KVMCLOCK_CTRL 76
+ #define KVM_CAP_SIGNAL_MSI 77
+ #define KVM_CAP_PPC_GET_SMMU_INFO 78
+ #define KVM_CAP_S390_COW 79
+ #define KVM_CAP_PPC_ALLOC_HTAB 80
+ #ifdef __KVM_HAVE_READONLY_MEM
+ #define KVM_CAP_READONLY_MEM 81
+ #endif
+ #define KVM_CAP_IRQFD_RESAMPLE 82
++#define KVM_CAP_PPC_BOOKE_WATCHDOG 83
+
+ #ifdef KVM_CAP_IRQ_ROUTING
+
+ struct kvm_irq_routing_irqchip {
+ __u32 irqchip;
+ __u32 pin;
+ };
+
+ struct kvm_irq_routing_msi {
+ __u32 address_lo;
+ __u32 address_hi;
+ __u32 data;
+ __u32 pad;
+ };
+
+ /* gsi routing entry types */
+ #define KVM_IRQ_ROUTING_IRQCHIP 1
+ #define KVM_IRQ_ROUTING_MSI 2
+
+ struct kvm_irq_routing_entry {
+ __u32 gsi;
+ __u32 type;
+ __u32 flags;
+ __u32 pad;
+ union {
+ struct kvm_irq_routing_irqchip irqchip;
+ struct kvm_irq_routing_msi msi;
+ __u32 pad[8];
+ } u;
+ };
+
+ struct kvm_irq_routing {
+ __u32 nr;
+ __u32 flags;
+ struct kvm_irq_routing_entry entries[0];
+ };
+
+ #endif
+
+ #ifdef KVM_CAP_MCE
+ /* x86 MCE */
+ struct kvm_x86_mce {
+ __u64 status;
+ __u64 addr;
+ __u64 misc;
+ __u64 mcg_status;
+ __u8 bank;
+ __u8 pad1[7];
+ __u64 pad2[3];
+ };
+ #endif
+
+ #ifdef KVM_CAP_XEN_HVM
+ struct kvm_xen_hvm_config {
+ __u32 flags;
+ __u32 msr;
+ __u64 blob_addr_32;
+ __u64 blob_addr_64;
+ __u8 blob_size_32;
+ __u8 blob_size_64;
+ __u8 pad2[30];
+ };
+ #endif
+
+ #define KVM_IRQFD_FLAG_DEASSIGN (1 << 0)
+ /*
+ * Available with KVM_CAP_IRQFD_RESAMPLE
+ *
+ * KVM_IRQFD_FLAG_RESAMPLE indicates resamplefd is valid and specifies
+ * the irqfd to operate in resampling mode for level triggered interrupt
+ * emlation. See Documentation/virtual/kvm/api.txt.
+ */
+ #define KVM_IRQFD_FLAG_RESAMPLE (1 << 1)
+
+ struct kvm_irqfd {
+ __u32 fd;
+ __u32 gsi;
+ __u32 flags;
+ __u32 resamplefd;
+ __u8 pad[16];
+ };
+
+ struct kvm_clock_data {
+ __u64 clock;
+ __u32 flags;
+ __u32 pad[9];
+ };
+
+ #define KVM_MMU_FSL_BOOKE_NOHV 0
+ #define KVM_MMU_FSL_BOOKE_HV 1
+
+ struct kvm_config_tlb {
+ __u64 params;
+ __u64 array;
+ __u32 mmu_type;
+ __u32 array_len;
+ };
+
+ struct kvm_dirty_tlb {
+ __u64 bitmap;
+ __u32 num_dirty;
+ };
+
+ /* Available with KVM_CAP_ONE_REG */
+
+ #define KVM_REG_ARCH_MASK 0xff00000000000000ULL
+ #define KVM_REG_GENERIC 0x0000000000000000ULL
+
+ /*
+ * Architecture specific registers are to be defined in arch headers and
+ * ORed with the arch identifier.
+ */
+ #define KVM_REG_PPC 0x1000000000000000ULL
+ #define KVM_REG_X86 0x2000000000000000ULL
+ #define KVM_REG_IA64 0x3000000000000000ULL
+ #define KVM_REG_ARM 0x4000000000000000ULL
+ #define KVM_REG_S390 0x5000000000000000ULL
+
+ #define KVM_REG_SIZE_SHIFT 52
+ #define KVM_REG_SIZE_MASK 0x00f0000000000000ULL
+ #define KVM_REG_SIZE_U8 0x0000000000000000ULL
+ #define KVM_REG_SIZE_U16 0x0010000000000000ULL
+ #define KVM_REG_SIZE_U32 0x0020000000000000ULL
+ #define KVM_REG_SIZE_U64 0x0030000000000000ULL
+ #define KVM_REG_SIZE_U128 0x0040000000000000ULL
+ #define KVM_REG_SIZE_U256 0x0050000000000000ULL
+ #define KVM_REG_SIZE_U512 0x0060000000000000ULL
+ #define KVM_REG_SIZE_U1024 0x0070000000000000ULL
+
+ struct kvm_one_reg {
+ __u64 id;
+ __u64 addr;
+ };
+
+ struct kvm_msi {
+ __u32 address_lo;
+ __u32 address_hi;
+ __u32 data;
+ __u32 flags;
+ __u8 pad[16];
+ };
+
+ /*
+ * ioctls for VM fds
+ */
+ #define KVM_SET_MEMORY_REGION _IOW(KVMIO, 0x40, struct kvm_memory_region)
+ /*
+ * KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns
+ * a vcpu fd.
+ */
+ #define KVM_CREATE_VCPU _IO(KVMIO, 0x41)
+ #define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log)
+ /* KVM_SET_MEMORY_ALIAS is obsolete: */
+ #define KVM_SET_MEMORY_ALIAS _IOW(KVMIO, 0x43, struct kvm_memory_alias)
+ #define KVM_SET_NR_MMU_PAGES _IO(KVMIO, 0x44)
+ #define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45)
+ #define KVM_SET_USER_MEMORY_REGION _IOW(KVMIO, 0x46, \
+ struct kvm_userspace_memory_region)
+ #define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47)
+ #define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64)
+
+ /* enable ucontrol for s390 */
+ struct kvm_s390_ucas_mapping {
+ __u64 user_addr;
+ __u64 vcpu_addr;
+ __u64 length;
+ };
+ #define KVM_S390_UCAS_MAP _IOW(KVMIO, 0x50, struct kvm_s390_ucas_mapping)
+ #define KVM_S390_UCAS_UNMAP _IOW(KVMIO, 0x51, struct kvm_s390_ucas_mapping)
+ #define KVM_S390_VCPU_FAULT _IOW(KVMIO, 0x52, unsigned long)
+
+ /* Device model IOC */
+ #define KVM_CREATE_IRQCHIP _IO(KVMIO, 0x60)
+ #define KVM_IRQ_LINE _IOW(KVMIO, 0x61, struct kvm_irq_level)
+ #define KVM_GET_IRQCHIP _IOWR(KVMIO, 0x62, struct kvm_irqchip)
+ #define KVM_SET_IRQCHIP _IOR(KVMIO, 0x63, struct kvm_irqchip)
+ #define KVM_CREATE_PIT _IO(KVMIO, 0x64)
+ #define KVM_GET_PIT _IOWR(KVMIO, 0x65, struct kvm_pit_state)
+ #define KVM_SET_PIT _IOR(KVMIO, 0x66, struct kvm_pit_state)
+ #define KVM_IRQ_LINE_STATUS _IOWR(KVMIO, 0x67, struct kvm_irq_level)
+ #define KVM_REGISTER_COALESCED_MMIO \
+ _IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone)
+ #define KVM_UNREGISTER_COALESCED_MMIO \
+ _IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone)
+ #define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \
+ struct kvm_assigned_pci_dev)
+ #define KVM_SET_GSI_ROUTING _IOW(KVMIO, 0x6a, struct kvm_irq_routing)
+ /* deprecated, replaced by KVM_ASSIGN_DEV_IRQ */
+ #define KVM_ASSIGN_IRQ __KVM_DEPRECATED_VM_R_0x70
+ #define KVM_ASSIGN_DEV_IRQ _IOW(KVMIO, 0x70, struct kvm_assigned_irq)
+ #define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71)
+ #define KVM_DEASSIGN_PCI_DEVICE _IOW(KVMIO, 0x72, \
+ struct kvm_assigned_pci_dev)
+ #define KVM_ASSIGN_SET_MSIX_NR _IOW(KVMIO, 0x73, \
+ struct kvm_assigned_msix_nr)
+ #define KVM_ASSIGN_SET_MSIX_ENTRY _IOW(KVMIO, 0x74, \
+ struct kvm_assigned_msix_entry)
+ #define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq)
+ #define KVM_IRQFD _IOW(KVMIO, 0x76, struct kvm_irqfd)
+ #define KVM_CREATE_PIT2 _IOW(KVMIO, 0x77, struct kvm_pit_config)
+ #define KVM_SET_BOOT_CPU_ID _IO(KVMIO, 0x78)
+ #define KVM_IOEVENTFD _IOW(KVMIO, 0x79, struct kvm_ioeventfd)
+ #define KVM_XEN_HVM_CONFIG _IOW(KVMIO, 0x7a, struct kvm_xen_hvm_config)
+ #define KVM_SET_CLOCK _IOW(KVMIO, 0x7b, struct kvm_clock_data)
+ #define KVM_GET_CLOCK _IOR(KVMIO, 0x7c, struct kvm_clock_data)
+ /* Available with KVM_CAP_PIT_STATE2 */
+ #define KVM_GET_PIT2 _IOR(KVMIO, 0x9f, struct kvm_pit_state2)
+ #define KVM_SET_PIT2 _IOW(KVMIO, 0xa0, struct kvm_pit_state2)
+ /* Available with KVM_CAP_PPC_GET_PVINFO */
+ #define KVM_PPC_GET_PVINFO _IOW(KVMIO, 0xa1, struct kvm_ppc_pvinfo)
+ /* Available with KVM_CAP_TSC_CONTROL */
+ #define KVM_SET_TSC_KHZ _IO(KVMIO, 0xa2)
+ #define KVM_GET_TSC_KHZ _IO(KVMIO, 0xa3)
+ /* Available with KVM_CAP_PCI_2_3 */
+ #define KVM_ASSIGN_SET_INTX_MASK _IOW(KVMIO, 0xa4, \
+ struct kvm_assigned_pci_dev)
+ /* Available with KVM_CAP_SIGNAL_MSI */
+ #define KVM_SIGNAL_MSI _IOW(KVMIO, 0xa5, struct kvm_msi)
+ /* Available with KVM_CAP_PPC_GET_SMMU_INFO */
+ #define KVM_PPC_GET_SMMU_INFO _IOR(KVMIO, 0xa6, struct kvm_ppc_smmu_info)
+ /* Available with KVM_CAP_PPC_ALLOC_HTAB */
+ #define KVM_PPC_ALLOCATE_HTAB _IOWR(KVMIO, 0xa7, __u32)
++#define KVM_CREATE_SPAPR_TCE _IOW(KVMIO, 0xa8, struct kvm_create_spapr_tce)
++/* Available with KVM_CAP_RMA */
++#define KVM_ALLOCATE_RMA _IOR(KVMIO, 0xa9, struct kvm_allocate_rma)
+
+ /*
+ * ioctls for vcpu fds
+ */
+ #define KVM_RUN _IO(KVMIO, 0x80)
+ #define KVM_GET_REGS _IOR(KVMIO, 0x81, struct kvm_regs)
+ #define KVM_SET_REGS _IOW(KVMIO, 0x82, struct kvm_regs)
+ #define KVM_GET_SREGS _IOR(KVMIO, 0x83, struct kvm_sregs)
+ #define KVM_SET_SREGS _IOW(KVMIO, 0x84, struct kvm_sregs)
+ #define KVM_TRANSLATE _IOWR(KVMIO, 0x85, struct kvm_translation)
+ #define KVM_INTERRUPT _IOW(KVMIO, 0x86, struct kvm_interrupt)
+ /* KVM_DEBUG_GUEST is no longer supported, use KVM_SET_GUEST_DEBUG instead */
+ #define KVM_DEBUG_GUEST __KVM_DEPRECATED_VCPU_W_0x87
+ #define KVM_GET_MSRS _IOWR(KVMIO, 0x88, struct kvm_msrs)
+ #define KVM_SET_MSRS _IOW(KVMIO, 0x89, struct kvm_msrs)
+ #define KVM_SET_CPUID _IOW(KVMIO, 0x8a, struct kvm_cpuid)
+ #define KVM_SET_SIGNAL_MASK _IOW(KVMIO, 0x8b, struct kvm_signal_mask)
+ #define KVM_GET_FPU _IOR(KVMIO, 0x8c, struct kvm_fpu)
+ #define KVM_SET_FPU _IOW(KVMIO, 0x8d, struct kvm_fpu)
+ #define KVM_GET_LAPIC _IOR(KVMIO, 0x8e, struct kvm_lapic_state)
+ #define KVM_SET_LAPIC _IOW(KVMIO, 0x8f, struct kvm_lapic_state)
+ #define KVM_SET_CPUID2 _IOW(KVMIO, 0x90, struct kvm_cpuid2)
+ #define KVM_GET_CPUID2 _IOWR(KVMIO, 0x91, struct kvm_cpuid2)
+ /* Available with KVM_CAP_VAPIC */
+ #define KVM_TPR_ACCESS_REPORTING _IOWR(KVMIO, 0x92, struct kvm_tpr_access_ctl)
+ /* Available with KVM_CAP_VAPIC */
+ #define KVM_SET_VAPIC_ADDR _IOW(KVMIO, 0x93, struct kvm_vapic_addr)
+ /* valid for virtual machine (for floating interrupt)_and_ vcpu */
+ #define KVM_S390_INTERRUPT _IOW(KVMIO, 0x94, struct kvm_s390_interrupt)
+ /* store status for s390 */
+ #define KVM_S390_STORE_STATUS_NOADDR (-1ul)
+ #define KVM_S390_STORE_STATUS_PREFIXED (-2ul)
+ #define KVM_S390_STORE_STATUS _IOW(KVMIO, 0x95, unsigned long)
+ /* initial ipl psw for s390 */
+ #define KVM_S390_SET_INITIAL_PSW _IOW(KVMIO, 0x96, struct kvm_s390_psw)
+ /* initial reset for s390 */
+ #define KVM_S390_INITIAL_RESET _IO(KVMIO, 0x97)
+ #define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state)
+ #define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state)
+ /* Available with KVM_CAP_NMI */
+ #define KVM_NMI _IO(KVMIO, 0x9a)
+ /* Available with KVM_CAP_SET_GUEST_DEBUG */
+ #define KVM_SET_GUEST_DEBUG _IOW(KVMIO, 0x9b, struct kvm_guest_debug)
+ /* MCE for x86 */
+ #define KVM_X86_SETUP_MCE _IOW(KVMIO, 0x9c, __u64)
+ #define KVM_X86_GET_MCE_CAP_SUPPORTED _IOR(KVMIO, 0x9d, __u64)
+ #define KVM_X86_SET_MCE _IOW(KVMIO, 0x9e, struct kvm_x86_mce)
+ /* IA64 stack access */
+ #define KVM_IA64_VCPU_GET_STACK _IOR(KVMIO, 0x9a, void *)
+ #define KVM_IA64_VCPU_SET_STACK _IOW(KVMIO, 0x9b, void *)
+ /* Available with KVM_CAP_VCPU_EVENTS */
+ #define KVM_GET_VCPU_EVENTS _IOR(KVMIO, 0x9f, struct kvm_vcpu_events)
+ #define KVM_SET_VCPU_EVENTS _IOW(KVMIO, 0xa0, struct kvm_vcpu_events)
+ /* Available with KVM_CAP_DEBUGREGS */
+ #define KVM_GET_DEBUGREGS _IOR(KVMIO, 0xa1, struct kvm_debugregs)
+ #define KVM_SET_DEBUGREGS _IOW(KVMIO, 0xa2, struct kvm_debugregs)
+ #define KVM_ENABLE_CAP _IOW(KVMIO, 0xa3, struct kvm_enable_cap)
+ /* Available with KVM_CAP_XSAVE */
+ #define KVM_GET_XSAVE _IOR(KVMIO, 0xa4, struct kvm_xsave)
+ #define KVM_SET_XSAVE _IOW(KVMIO, 0xa5, struct kvm_xsave)
+ /* Available with KVM_CAP_XCRS */
+ #define KVM_GET_XCRS _IOR(KVMIO, 0xa6, struct kvm_xcrs)
+ #define KVM_SET_XCRS _IOW(KVMIO, 0xa7, struct kvm_xcrs)
+ /* Available with KVM_CAP_SW_TLB */
+ #define KVM_DIRTY_TLB _IOW(KVMIO, 0xaa, struct kvm_dirty_tlb)
+ /* Available with KVM_CAP_ONE_REG */
+ #define KVM_GET_ONE_REG _IOW(KVMIO, 0xab, struct kvm_one_reg)
+ #define KVM_SET_ONE_REG _IOW(KVMIO, 0xac, struct kvm_one_reg)
+ /* VM is being stopped by host */
+ #define KVM_KVMCLOCK_CTRL _IO(KVMIO, 0xad)
+
+ #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
+ #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
+ #define KVM_DEV_ASSIGN_MASK_INTX (1 << 2)
+
+ struct kvm_assigned_pci_dev {
+ __u32 assigned_dev_id;
+ __u32 busnr;
+ __u32 devfn;
+ __u32 flags;
+ __u32 segnr;
+ union {
+ __u32 reserved[11];
+ };
+ };
+
+ #define KVM_DEV_IRQ_HOST_INTX (1 << 0)
+ #define KVM_DEV_IRQ_HOST_MSI (1 << 1)
+ #define KVM_DEV_IRQ_HOST_MSIX (1 << 2)
+
+ #define KVM_DEV_IRQ_GUEST_INTX (1 << 8)
+ #define KVM_DEV_IRQ_GUEST_MSI (1 << 9)
+ #define KVM_DEV_IRQ_GUEST_MSIX (1 << 10)
+
+ #define KVM_DEV_IRQ_HOST_MASK 0x00ff
+ #define KVM_DEV_IRQ_GUEST_MASK 0xff00
+
+ struct kvm_assigned_irq {
+ __u32 assigned_dev_id;
+ __u32 host_irq; /* ignored (legacy field) */
+ __u32 guest_irq;
+ __u32 flags;
+ union {
+ __u32 reserved[12];
+ };
+ };
+
+ struct kvm_assigned_msix_nr {
+ __u32 assigned_dev_id;
+ __u16 entry_nr;
+ __u16 padding;
+ };
+
+ #define KVM_MAX_MSIX_PER_DEV 256
+ struct kvm_assigned_msix_entry {
+ __u32 assigned_dev_id;
+ __u32 gsi;
+ __u16 entry; /* The index of entry in the MSI-X table */
+ __u16 padding[3];
+ };
+
+ #endif /* __LINUX_KVM_H */