KVM stub

http://os1a.cs.columbia.edu/lxr/source/arch/ia64/kvm/vmm.c?a=x86
001 /*
002  * vmm.c: vmm module interface with kvm module
003  *
004  * Copyright (c) 2007, Intel Corporation.
005  *
006  *  Xiantao Zhang ([email protected])
007  *
008  * This program is free software; you can redistribute it and/or modify it
009  * under the terms and conditions of the GNU General Public License,
010  * version 2, as published by the Free Software Foundation.
011  *
012  * This program is distributed in the hope it will be useful, but WITHOUT
013  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
014  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
015  * more details.
016  *
017  * You should have received a copy of the GNU General Public License along with
018  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
019  * Place - Suite 330, Boston, MA 02111-1307 USA.
020  */
021 
022 
023 #include<linux/kernel.h>
024 #include<linux/module.h>
025 #include<asm/fpswa.h>
026 
027 #include "vcpu.h"
028 
029 MODULE_AUTHOR("Intel");
030 MODULE_LICENSE("GPL");
031 
032 extern char kvm_ia64_ivt;
033 extern char kvm_asm_mov_from_ar;
034 extern char kvm_asm_mov_from_ar_sn2;
035 extern fpswa_interface_t *vmm_fpswa_interface;
036 
037 long vmm_sanity = 1;
038 
039 struct kvm_vmm_info vmm_info = {
040         .module                 = THIS_MODULE,
041         .vmm_entry              = vmm_entry,
042         .tramp_entry            = vmm_trampoline,
043         .vmm_ivt                = (unsigned long)&kvm_ia64_ivt,
044         .patch_mov_ar           = (unsigned long)&kvm_asm_mov_from_ar,
045         .patch_mov_ar_sn2       = (unsigned long)&kvm_asm_mov_from_ar_sn2,
046 };
047 
048 static int __init  kvm_vmm_init(void)
049 {
050 
051         vmm_fpswa_interface = fpswa_interface;
052 
053         /*Register vmm data to kvm side*/
054         return kvm_init(&vmm_info, 1024, 0, THIS_MODULE);
055 }
056 
057 static void __exit kvm_vmm_exit(void)
058 {
059         kvm_exit();
060         return ;
061 }
062 
063 void vmm_spin_lock(vmm_spinlock_t *lock)
064 {
065         _vmm_raw_spin_lock(lock);
066 }
067 
068 void vmm_spin_unlock(vmm_spinlock_t *lock)
069 {
070         _vmm_raw_spin_unlock(lock);
071 }
072 
073 static void vcpu_debug_exit(struct kvm_vcpu *vcpu)
074 {
075         struct exit_ctl_data *p = &vcpu->arch.exit_data;
076         long psr;
077 
078         local_irq_save(psr);
079         p->exit_reason = EXIT_REASON_DEBUG;
080         vmm_transition(vcpu);
081         local_irq_restore(psr);
082 }
083 
084 asmlinkage int printk(const char *fmt, ...)
085 {
086         struct kvm_vcpu *vcpu = current_vcpu;
087         va_list args;
088         int r;
089 
090         memset(vcpu->arch.log_buf, 0, VMM_LOG_LEN);
091         va_start(args, fmt);
092         r = vsnprintf(vcpu->arch.log_buf, VMM_LOG_LEN, fmt, args);
093         va_end(args);
094         vcpu_debug_exit(vcpu);
095         return r;
096 }
097 
098 module_init(kvm_vmm_init)
099 module_exit(kvm_vmm_exit)

좋은 웹페이지 즐겨찾기