123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394 |
- /*
- * QEMU Xen emulation: Event channel support
- *
- * Copyright © 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Authors: David Woodhouse <dwmw2@infradead.org>
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- */
- #include "qemu/osdep.h"
- #include "qemu/host-utils.h"
- #include "qemu/module.h"
- #include "qemu/lockable.h"
- #include "qemu/main-loop.h"
- #include "qemu/log.h"
- #include "qemu/error-report.h"
- #include "monitor/monitor.h"
- #include "monitor/hmp.h"
- #include "qapi/error.h"
- #include "qapi/qapi-commands-misc-target.h"
- #include "qobject/qdict.h"
- #include "qom/object.h"
- #include "exec/target_page.h"
- #include "exec/address-spaces.h"
- #include "migration/vmstate.h"
- #include "trace.h"
- #include "hw/sysbus.h"
- #include "hw/xen/xen.h"
- #include "hw/i386/x86.h"
- #include "hw/i386/pc.h"
- #include "hw/pci/pci.h"
- #include "hw/pci/msi.h"
- #include "hw/pci/msix.h"
- #include "hw/irq.h"
- #include "hw/xen/xen_backend_ops.h"
- #include "xen_evtchn.h"
- #include "xen_overlay.h"
- #include "xen_xenstore.h"
- #include "system/kvm.h"
- #include "system/kvm_xen.h"
- #include <linux/kvm.h>
- #include <sys/eventfd.h>
- #include "hw/xen/interface/memory.h"
- #include "hw/xen/interface/hvm/params.h"
- /* XX: For kvm_update_msi_routes_all() */
- #include "target/i386/kvm/kvm_i386.h"
- #define TYPE_XEN_EVTCHN "xen-evtchn"
- OBJECT_DECLARE_SIMPLE_TYPE(XenEvtchnState, XEN_EVTCHN)
- typedef struct XenEvtchnPort {
- uint32_t vcpu; /* Xen/ACPI vcpu_id */
- uint16_t type; /* EVTCHNSTAT_xxxx */
- union {
- uint16_t val; /* raw value for serialization etc. */
- uint16_t pirq;
- uint16_t virq;
- struct {
- uint16_t port:15;
- uint16_t to_qemu:1; /* Only two targets; qemu or loopback */
- } interdomain;
- } u;
- } XenEvtchnPort;
- /* 32-bit compatibility definitions, also used natively in 32-bit build */
- struct compat_arch_vcpu_info {
- unsigned int cr2;
- unsigned int pad[5];
- };
- struct compat_vcpu_info {
- uint8_t evtchn_upcall_pending;
- uint8_t evtchn_upcall_mask;
- uint16_t pad;
- uint32_t evtchn_pending_sel;
- struct compat_arch_vcpu_info arch;
- struct vcpu_time_info time;
- }; /* 64 bytes (x86) */
- struct compat_arch_shared_info {
- unsigned int max_pfn;
- unsigned int pfn_to_mfn_frame_list_list;
- unsigned int nmi_reason;
- unsigned int p2m_cr3;
- unsigned int p2m_vaddr;
- unsigned int p2m_generation;
- uint32_t wc_sec_hi;
- };
- struct compat_shared_info {
- struct compat_vcpu_info vcpu_info[XEN_LEGACY_MAX_VCPUS];
- uint32_t evtchn_pending[32];
- uint32_t evtchn_mask[32];
- uint32_t wc_version; /* Version counter: see vcpu_time_info_t. */
- uint32_t wc_sec;
- uint32_t wc_nsec;
- struct compat_arch_shared_info arch;
- };
- #define COMPAT_EVTCHN_2L_NR_CHANNELS 1024
- /* Local private implementation of struct xenevtchn_handle */
- struct xenevtchn_handle {
- evtchn_port_t be_port;
- evtchn_port_t guest_port; /* Or zero for unbound */
- int fd;
- };
- /*
- * These 'emuirq' values are used by Xen in the LM stream... and yes, I am
- * insane enough to think about guest-transparent live migration from actual
- * Xen to QEMU, and ensuring that we can convert/consume the stream.
- */
- #define IRQ_UNBOUND -1
- #define IRQ_PT -2
- #define IRQ_MSI_EMU -3
- struct pirq_info {
- int gsi;
- uint16_t port;
- PCIDevice *dev;
- int vector;
- bool is_msix;
- bool is_masked;
- bool is_translated;
- };
- struct XenEvtchnState {
- /*< private >*/
- SysBusDevice busdev;
- /*< public >*/
- uint64_t callback_param;
- bool evtchn_in_kernel;
- bool setting_callback_gsi;
- int extern_gsi_level;
- uint32_t callback_gsi;
- QEMUBH *gsi_bh;
- QemuMutex port_lock;
- uint32_t nr_ports;
- XenEvtchnPort port_table[EVTCHN_2L_NR_CHANNELS];
- /* Connected to the system GSIs for raising callback as GSI / INTx */
- unsigned int nr_callback_gsis;
- qemu_irq *callback_gsis;
- struct xenevtchn_handle *be_handles[EVTCHN_2L_NR_CHANNELS];
- uint32_t nr_pirqs;
- /* Bitmap of allocated PIRQs (serialized) */
- uint16_t nr_pirq_inuse_words;
- uint64_t *pirq_inuse_bitmap;
- /* GSI → PIRQ mapping (serialized) */
- uint16_t gsi_pirq[IOAPIC_NUM_PINS];
- /* Per-GSI assertion state (serialized) */
- uint32_t pirq_gsi_set;
- /* Per-PIRQ information (rebuilt on migration, protected by BQL) */
- struct pirq_info *pirq;
- };
- #define pirq_inuse_word(s, pirq) (s->pirq_inuse_bitmap[((pirq) / 64)])
- #define pirq_inuse_bit(pirq) (1ULL << ((pirq) & 63))
- #define pirq_inuse(s, pirq) (pirq_inuse_word(s, pirq) & pirq_inuse_bit(pirq))
- struct XenEvtchnState *xen_evtchn_singleton;
- /* Top bits of callback_param are the type (HVM_PARAM_CALLBACK_TYPE_xxx) */
- #define CALLBACK_VIA_TYPE_SHIFT 56
- static void unbind_backend_ports(XenEvtchnState *s);
- static int xen_evtchn_pre_load(void *opaque)
- {
- XenEvtchnState *s = opaque;
- /* Unbind all the backend-side ports; they need to rebind */
- unbind_backend_ports(s);
- /* It'll be leaked otherwise. */
- g_free(s->pirq_inuse_bitmap);
- s->pirq_inuse_bitmap = NULL;
- return 0;
- }
- static int xen_evtchn_post_load(void *opaque, int version_id)
- {
- XenEvtchnState *s = opaque;
- uint32_t i;
- if (s->callback_param) {
- xen_evtchn_set_callback_param(s->callback_param);
- }
- /* Rebuild s->pirq[].port mapping */
- for (i = 0; i < s->nr_ports; i++) {
- XenEvtchnPort *p = &s->port_table[i];
- if (p->type == EVTCHNSTAT_pirq) {
- assert(p->u.pirq);
- assert(p->u.pirq < s->nr_pirqs);
- /*
- * Set the gsi to IRQ_UNBOUND; it may be changed to an actual
- * GSI# below, or to IRQ_MSI_EMU when the MSI table snooping
- * catches up with it.
- */
- s->pirq[p->u.pirq].gsi = IRQ_UNBOUND;
- s->pirq[p->u.pirq].port = i;
- }
- }
- /* Rebuild s->pirq[].gsi mapping */
- for (i = 0; i < IOAPIC_NUM_PINS; i++) {
- if (s->gsi_pirq[i]) {
- s->pirq[s->gsi_pirq[i]].gsi = i;
- }
- }
- return 0;
- }
- static bool xen_evtchn_is_needed(void *opaque)
- {
- return xen_mode == XEN_EMULATE;
- }
- static const VMStateDescription xen_evtchn_port_vmstate = {
- .name = "xen_evtchn_port",
- .version_id = 1,
- .minimum_version_id = 1,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT32(vcpu, XenEvtchnPort),
- VMSTATE_UINT16(type, XenEvtchnPort),
- VMSTATE_UINT16(u.val, XenEvtchnPort),
- VMSTATE_END_OF_LIST()
- }
- };
- static const VMStateDescription xen_evtchn_vmstate = {
- .name = "xen_evtchn",
- .version_id = 1,
- .minimum_version_id = 1,
- .needed = xen_evtchn_is_needed,
- .pre_load = xen_evtchn_pre_load,
- .post_load = xen_evtchn_post_load,
- .fields = (const VMStateField[]) {
- VMSTATE_UINT64(callback_param, XenEvtchnState),
- VMSTATE_UINT32(nr_ports, XenEvtchnState),
- VMSTATE_STRUCT_VARRAY_UINT32(port_table, XenEvtchnState, nr_ports, 1,
- xen_evtchn_port_vmstate, XenEvtchnPort),
- VMSTATE_UINT16_ARRAY(gsi_pirq, XenEvtchnState, IOAPIC_NUM_PINS),
- VMSTATE_VARRAY_UINT16_ALLOC(pirq_inuse_bitmap, XenEvtchnState,
- nr_pirq_inuse_words, 0,
- vmstate_info_uint64, uint64_t),
- VMSTATE_UINT32(pirq_gsi_set, XenEvtchnState),
- VMSTATE_END_OF_LIST()
- }
- };
- static void xen_evtchn_class_init(ObjectClass *klass, void *data)
- {
- DeviceClass *dc = DEVICE_CLASS(klass);
- dc->vmsd = &xen_evtchn_vmstate;
- }
- static const TypeInfo xen_evtchn_info = {
- .name = TYPE_XEN_EVTCHN,
- .parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(XenEvtchnState),
- .class_init = xen_evtchn_class_init,
- };
- static struct evtchn_backend_ops emu_evtchn_backend_ops = {
- .open = xen_be_evtchn_open,
- .bind_interdomain = xen_be_evtchn_bind_interdomain,
- .unbind = xen_be_evtchn_unbind,
- .close = xen_be_evtchn_close,
- .get_fd = xen_be_evtchn_fd,
- .notify = xen_be_evtchn_notify,
- .unmask = xen_be_evtchn_unmask,
- .pending = xen_be_evtchn_pending,
- };
- static void gsi_assert_bh(void *opaque)
- {
- struct vcpu_info *vi = kvm_xen_get_vcpu_info_hva(0);
- if (vi) {
- xen_evtchn_set_callback_level(!!vi->evtchn_upcall_pending);
- }
- }
- void xen_evtchn_create(unsigned int nr_gsis, qemu_irq *system_gsis)
- {
- XenEvtchnState *s = XEN_EVTCHN(sysbus_create_simple(TYPE_XEN_EVTCHN,
- -1, NULL));
- int i;
- xen_evtchn_singleton = s;
- qemu_mutex_init(&s->port_lock);
- s->gsi_bh = aio_bh_new(qemu_get_aio_context(), gsi_assert_bh, s);
- /*
- * These are the *output* GSI from event channel support, for
- * signalling CPU0's events via GSI or PCI INTx instead of the
- * per-CPU vector. We create a *set* of irqs and connect one to
- * each of the system GSIs which were passed in from the platform
- * code, and then just trigger the right one as appropriate from
- * xen_evtchn_set_callback_level().
- */
- s->nr_callback_gsis = nr_gsis;
- s->callback_gsis = g_new0(qemu_irq, nr_gsis);
- for (i = 0; i < nr_gsis; i++) {
- sysbus_init_irq(SYS_BUS_DEVICE(s), &s->callback_gsis[i]);
- sysbus_connect_irq(SYS_BUS_DEVICE(s), i, system_gsis[i]);
- }
- /*
- * The Xen scheme for encoding PIRQ# into an MSI message is not
- * compatible with 32-bit MSI, as it puts the high bits of the
- * PIRQ# into the high bits of the MSI message address, instead of
- * using the Extended Destination ID in address bits 4-11 which
- * perhaps would have been a better choice.
- *
- * To keep life simple, kvm_accel_instance_init() initialises the
- * default to 256. which conveniently doesn't need to set anything
- * outside the low 32 bits of the address. It can be increased by
- * setting the xen-evtchn-max-pirq property.
- */
- s->nr_pirqs = kvm_xen_get_evtchn_max_pirq();
- s->nr_pirq_inuse_words = DIV_ROUND_UP(s->nr_pirqs, 64);
- s->pirq_inuse_bitmap = g_new0(uint64_t, s->nr_pirq_inuse_words);
- s->pirq = g_new0(struct pirq_info, s->nr_pirqs);
- /* Set event channel functions for backend drivers to use */
- xen_evtchn_ops = &emu_evtchn_backend_ops;
- }
- static void xen_evtchn_register_types(void)
- {
- type_register_static(&xen_evtchn_info);
- }
- type_init(xen_evtchn_register_types)
- static int set_callback_pci_intx(XenEvtchnState *s, uint64_t param)
- {
- PCMachineState *pcms = PC_MACHINE(qdev_get_machine());
- uint8_t pin = param & 3;
- uint8_t devfn = (param >> 8) & 0xff;
- uint16_t bus = (param >> 16) & 0xffff;
- uint16_t domain = (param >> 32) & 0xffff;
- PCIDevice *pdev;
- PCIINTxRoute r;
- if (domain || !pcms) {
- return 0;
- }
- pdev = pci_find_device(pcms->pcibus, bus, devfn);
- if (!pdev) {
- return 0;
- }
- r = pci_device_route_intx_to_irq(pdev, pin);
- if (r.mode != PCI_INTX_ENABLED) {
- return 0;
- }
- /*
- * Hm, can we be notified of INTX routing changes? Not without
- * *owning* the device and being allowed to overwrite its own
- * ->intx_routing_notifier, AFAICT. So let's not.
- */
- return r.irq;
- }
- void xen_evtchn_set_callback_level(int level)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- if (!s) {
- return;
- }
- /*
- * We get to this function in a number of ways:
- *
- * • From I/O context, via PV backend drivers sending a notification to
- * the guest.
- *
- * • From guest vCPU context, via loopback interdomain event channels
- * (or theoretically even IPIs but guests don't use those with GSI
- * delivery because that's pointless. We don't want a malicious guest
- * to be able to trigger a deadlock though, so we can't rule it out.)
- *
- * • From guest vCPU context when the HVM_PARAM_CALLBACK_IRQ is being
- * configured.
- *
- * • From guest vCPU context in the KVM exit handler, if the upcall
- * pending flag has been cleared and the GSI needs to be deasserted.
- *
- * • Maybe in future, in an interrupt ack/eoi notifier when the GSI has
- * been acked in the irqchip.
- *
- * Whichever context we come from if we aren't already holding the BQL
- * then e can't take it now, as we may already hold s->port_lock. So
- * trigger the BH to set the IRQ for us instead of doing it immediately.
- *
- * In the HVM_PARAM_CALLBACK_IRQ and KVM exit handler cases, the caller
- * will deliberately take the BQL because they want the change to take
- * effect immediately. That just leaves interdomain loopback as the case
- * which uses the BH.
- */
- if (!bql_locked()) {
- qemu_bh_schedule(s->gsi_bh);
- return;
- }
- if (s->callback_gsi && s->callback_gsi < s->nr_callback_gsis) {
- /*
- * Ugly, but since we hold the BQL we can set this flag so that
- * xen_evtchn_set_gsi() can tell the difference between this code
- * setting the GSI, and an external device (PCI INTx) doing so.
- */
- s->setting_callback_gsi = true;
- /* Do not deassert the line if an external device is asserting it. */
- qemu_set_irq(s->callback_gsis[s->callback_gsi],
- level || s->extern_gsi_level);
- s->setting_callback_gsi = false;
- /*
- * If the callback GSI is the only one asserted, ensure the status
- * is polled for deassertion in kvm_arch_post_run().
- */
- if (level && !s->extern_gsi_level) {
- kvm_xen_set_callback_asserted();
- }
- }
- }
- int xen_evtchn_set_callback_param(uint64_t param)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- struct kvm_xen_hvm_attr xa = {
- .type = KVM_XEN_ATTR_TYPE_UPCALL_VECTOR,
- .u.vector = 0,
- };
- bool in_kernel = false;
- uint32_t gsi = 0;
- int type = param >> CALLBACK_VIA_TYPE_SHIFT;
- int ret;
- if (!s) {
- return -ENOTSUP;
- }
- /*
- * We need the BQL because set_callback_pci_intx() may call into PCI code,
- * and because we may need to manipulate the old and new GSI levels.
- */
- assert(bql_locked());
- qemu_mutex_lock(&s->port_lock);
- switch (type) {
- case HVM_PARAM_CALLBACK_TYPE_VECTOR: {
- xa.u.vector = (uint8_t)param,
- ret = kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &xa);
- if (!ret && kvm_xen_has_cap(EVTCHN_SEND)) {
- in_kernel = true;
- }
- gsi = 0;
- break;
- }
- case HVM_PARAM_CALLBACK_TYPE_PCI_INTX:
- gsi = set_callback_pci_intx(s, param);
- ret = gsi ? 0 : -EINVAL;
- break;
- case HVM_PARAM_CALLBACK_TYPE_GSI:
- gsi = (uint32_t)param;
- ret = 0;
- break;
- default:
- /* Xen doesn't return error even if you set something bogus */
- ret = 0;
- break;
- }
- /* If the guest has set a per-vCPU callback vector, prefer that. */
- if (gsi && kvm_xen_has_vcpu_callback_vector()) {
- in_kernel = kvm_xen_has_cap(EVTCHN_SEND);
- gsi = 0;
- }
- if (!ret) {
- /* If vector delivery was turned *off* then tell the kernel */
- if ((s->callback_param >> CALLBACK_VIA_TYPE_SHIFT) ==
- HVM_PARAM_CALLBACK_TYPE_VECTOR && !xa.u.vector) {
- kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &xa);
- }
- s->callback_param = param;
- s->evtchn_in_kernel = in_kernel;
- if (gsi != s->callback_gsi) {
- struct vcpu_info *vi = kvm_xen_get_vcpu_info_hva(0);
- xen_evtchn_set_callback_level(0);
- s->callback_gsi = gsi;
- if (gsi && vi && vi->evtchn_upcall_pending) {
- kvm_xen_inject_vcpu_callback_vector(0, type);
- }
- }
- }
- qemu_mutex_unlock(&s->port_lock);
- return ret;
- }
- static void inject_callback(XenEvtchnState *s, uint32_t vcpu)
- {
- int type = s->callback_param >> CALLBACK_VIA_TYPE_SHIFT;
- kvm_xen_inject_vcpu_callback_vector(vcpu, type);
- }
- static void deassign_kernel_port(evtchn_port_t port)
- {
- struct kvm_xen_hvm_attr ha;
- int ret;
- ha.type = KVM_XEN_ATTR_TYPE_EVTCHN;
- ha.u.evtchn.send_port = port;
- ha.u.evtchn.flags = KVM_XEN_EVTCHN_DEASSIGN;
- ret = kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &ha);
- if (ret) {
- qemu_log_mask(LOG_GUEST_ERROR, "Failed to unbind kernel port %d: %s\n",
- port, strerror(ret));
- }
- }
- static int assign_kernel_port(uint16_t type, evtchn_port_t port,
- uint32_t vcpu_id)
- {
- CPUState *cpu = qemu_get_cpu(vcpu_id);
- struct kvm_xen_hvm_attr ha;
- if (!cpu) {
- return -ENOENT;
- }
- ha.type = KVM_XEN_ATTR_TYPE_EVTCHN;
- ha.u.evtchn.send_port = port;
- ha.u.evtchn.type = type;
- ha.u.evtchn.flags = 0;
- ha.u.evtchn.deliver.port.port = port;
- ha.u.evtchn.deliver.port.vcpu = kvm_arch_vcpu_id(cpu);
- ha.u.evtchn.deliver.port.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
- return kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &ha);
- }
- static int assign_kernel_eventfd(uint16_t type, evtchn_port_t port, int fd)
- {
- struct kvm_xen_hvm_attr ha;
- ha.type = KVM_XEN_ATTR_TYPE_EVTCHN;
- ha.u.evtchn.send_port = port;
- ha.u.evtchn.type = type;
- ha.u.evtchn.flags = 0;
- ha.u.evtchn.deliver.eventfd.port = 0;
- ha.u.evtchn.deliver.eventfd.fd = fd;
- return kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_SET_ATTR, &ha);
- }
- static bool valid_port(evtchn_port_t port)
- {
- if (!port) {
- return false;
- }
- if (xen_is_long_mode()) {
- return port < EVTCHN_2L_NR_CHANNELS;
- } else {
- return port < COMPAT_EVTCHN_2L_NR_CHANNELS;
- }
- }
- static bool valid_vcpu(uint32_t vcpu)
- {
- return !!qemu_get_cpu(vcpu);
- }
- static void unbind_backend_ports(XenEvtchnState *s)
- {
- XenEvtchnPort *p;
- int i;
- for (i = 1; i < s->nr_ports; i++) {
- p = &s->port_table[i];
- if (p->type == EVTCHNSTAT_interdomain && p->u.interdomain.to_qemu) {
- evtchn_port_t be_port = p->u.interdomain.port;
- if (s->be_handles[be_port]) {
- /* This part will be overwritten on the load anyway. */
- p->type = EVTCHNSTAT_unbound;
- p->u.interdomain.port = 0;
- /* Leave the backend port open and unbound too. */
- if (kvm_xen_has_cap(EVTCHN_SEND)) {
- deassign_kernel_port(i);
- }
- s->be_handles[be_port]->guest_port = 0;
- }
- }
- }
- }
- int xen_evtchn_status_op(struct evtchn_status *status)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- XenEvtchnPort *p;
- if (!s) {
- return -ENOTSUP;
- }
- if (status->dom != DOMID_SELF && status->dom != xen_domid) {
- return -ESRCH;
- }
- if (!valid_port(status->port)) {
- return -EINVAL;
- }
- qemu_mutex_lock(&s->port_lock);
- p = &s->port_table[status->port];
- status->status = p->type;
- status->vcpu = p->vcpu;
- switch (p->type) {
- case EVTCHNSTAT_unbound:
- status->u.unbound.dom = p->u.interdomain.to_qemu ? DOMID_QEMU
- : xen_domid;
- break;
- case EVTCHNSTAT_interdomain:
- status->u.interdomain.dom = p->u.interdomain.to_qemu ? DOMID_QEMU
- : xen_domid;
- status->u.interdomain.port = p->u.interdomain.port;
- break;
- case EVTCHNSTAT_pirq:
- status->u.pirq = p->u.pirq;
- break;
- case EVTCHNSTAT_virq:
- status->u.virq = p->u.virq;
- break;
- }
- qemu_mutex_unlock(&s->port_lock);
- return 0;
- }
- /*
- * Never thought I'd hear myself say this, but C++ templates would be
- * kind of nice here.
- *
- * template<class T> static int do_unmask_port(T *shinfo, ...);
- */
- static int do_unmask_port_lm(XenEvtchnState *s, evtchn_port_t port,
- bool do_unmask, struct shared_info *shinfo,
- struct vcpu_info *vcpu_info)
- {
- const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
- typeof(shinfo->evtchn_pending[0]) mask;
- int idx = port / bits_per_word;
- int offset = port % bits_per_word;
- mask = 1UL << offset;
- if (idx >= bits_per_word) {
- return -EINVAL;
- }
- if (do_unmask) {
- /*
- * If this is a true unmask operation, clear the mask bit. If
- * it was already unmasked, we have nothing further to do.
- */
- if (!((qatomic_fetch_and(&shinfo->evtchn_mask[idx], ~mask) & mask))) {
- return 0;
- }
- } else {
- /*
- * This is a pseudo-unmask for affinity changes. We don't
- * change the mask bit, and if it's *masked* we have nothing
- * else to do.
- */
- if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) {
- return 0;
- }
- }
- /* If the event was not pending, we're done. */
- if (!(qatomic_fetch_or(&shinfo->evtchn_pending[idx], 0) & mask)) {
- return 0;
- }
- /* Now on to the vcpu_info evtchn_pending_sel index... */
- mask = 1UL << idx;
- /* If a port in this word was already pending for this vCPU, all done. */
- if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) {
- return 0;
- }
- /* Set evtchn_upcall_pending for this vCPU */
- if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) {
- return 0;
- }
- inject_callback(s, s->port_table[port].vcpu);
- return 0;
- }
- static int do_unmask_port_compat(XenEvtchnState *s, evtchn_port_t port,
- bool do_unmask,
- struct compat_shared_info *shinfo,
- struct compat_vcpu_info *vcpu_info)
- {
- const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
- typeof(shinfo->evtchn_pending[0]) mask;
- int idx = port / bits_per_word;
- int offset = port % bits_per_word;
- mask = 1UL << offset;
- if (idx >= bits_per_word) {
- return -EINVAL;
- }
- if (do_unmask) {
- /*
- * If this is a true unmask operation, clear the mask bit. If
- * it was already unmasked, we have nothing further to do.
- */
- if (!((qatomic_fetch_and(&shinfo->evtchn_mask[idx], ~mask) & mask))) {
- return 0;
- }
- } else {
- /*
- * This is a pseudo-unmask for affinity changes. We don't
- * change the mask bit, and if it's *masked* we have nothing
- * else to do.
- */
- if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) {
- return 0;
- }
- }
- /* If the event was not pending, we're done. */
- if (!(qatomic_fetch_or(&shinfo->evtchn_pending[idx], 0) & mask)) {
- return 0;
- }
- /* Now on to the vcpu_info evtchn_pending_sel index... */
- mask = 1UL << idx;
- /* If a port in this word was already pending for this vCPU, all done. */
- if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) {
- return 0;
- }
- /* Set evtchn_upcall_pending for this vCPU */
- if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) {
- return 0;
- }
- inject_callback(s, s->port_table[port].vcpu);
- return 0;
- }
- static int unmask_port(XenEvtchnState *s, evtchn_port_t port, bool do_unmask)
- {
- void *vcpu_info, *shinfo;
- if (s->port_table[port].type == EVTCHNSTAT_closed) {
- return -EINVAL;
- }
- shinfo = xen_overlay_get_shinfo_ptr();
- if (!shinfo) {
- return -ENOTSUP;
- }
- vcpu_info = kvm_xen_get_vcpu_info_hva(s->port_table[port].vcpu);
- if (!vcpu_info) {
- return -EINVAL;
- }
- if (xen_is_long_mode()) {
- return do_unmask_port_lm(s, port, do_unmask, shinfo, vcpu_info);
- } else {
- return do_unmask_port_compat(s, port, do_unmask, shinfo, vcpu_info);
- }
- }
- static int do_set_port_lm(XenEvtchnState *s, evtchn_port_t port,
- struct shared_info *shinfo,
- struct vcpu_info *vcpu_info)
- {
- const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
- typeof(shinfo->evtchn_pending[0]) mask;
- int idx = port / bits_per_word;
- int offset = port % bits_per_word;
- mask = 1UL << offset;
- if (idx >= bits_per_word) {
- return -EINVAL;
- }
- /* Update the pending bit itself. If it was already set, we're done. */
- if (qatomic_fetch_or(&shinfo->evtchn_pending[idx], mask) & mask) {
- return 0;
- }
- /* Check if it's masked. */
- if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) {
- return 0;
- }
- /* Now on to the vcpu_info evtchn_pending_sel index... */
- mask = 1UL << idx;
- /* If a port in this word was already pending for this vCPU, all done. */
- if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) {
- return 0;
- }
- /* Set evtchn_upcall_pending for this vCPU */
- if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) {
- return 0;
- }
- inject_callback(s, s->port_table[port].vcpu);
- return 0;
- }
- static int do_set_port_compat(XenEvtchnState *s, evtchn_port_t port,
- struct compat_shared_info *shinfo,
- struct compat_vcpu_info *vcpu_info)
- {
- const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
- typeof(shinfo->evtchn_pending[0]) mask;
- int idx = port / bits_per_word;
- int offset = port % bits_per_word;
- mask = 1UL << offset;
- if (idx >= bits_per_word) {
- return -EINVAL;
- }
- /* Update the pending bit itself. If it was already set, we're done. */
- if (qatomic_fetch_or(&shinfo->evtchn_pending[idx], mask) & mask) {
- return 0;
- }
- /* Check if it's masked. */
- if (qatomic_fetch_or(&shinfo->evtchn_mask[idx], 0) & mask) {
- return 0;
- }
- /* Now on to the vcpu_info evtchn_pending_sel index... */
- mask = 1UL << idx;
- /* If a port in this word was already pending for this vCPU, all done. */
- if (qatomic_fetch_or(&vcpu_info->evtchn_pending_sel, mask) & mask) {
- return 0;
- }
- /* Set evtchn_upcall_pending for this vCPU */
- if (qatomic_fetch_or(&vcpu_info->evtchn_upcall_pending, 1)) {
- return 0;
- }
- inject_callback(s, s->port_table[port].vcpu);
- return 0;
- }
- static int set_port_pending(XenEvtchnState *s, evtchn_port_t port)
- {
- void *vcpu_info, *shinfo;
- if (s->port_table[port].type == EVTCHNSTAT_closed) {
- return -EINVAL;
- }
- if (s->evtchn_in_kernel) {
- XenEvtchnPort *p = &s->port_table[port];
- CPUState *cpu = qemu_get_cpu(p->vcpu);
- struct kvm_irq_routing_xen_evtchn evt;
- if (!cpu) {
- return 0;
- }
- evt.port = port;
- evt.vcpu = kvm_arch_vcpu_id(cpu);
- evt.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
- return kvm_vm_ioctl(kvm_state, KVM_XEN_HVM_EVTCHN_SEND, &evt);
- }
- shinfo = xen_overlay_get_shinfo_ptr();
- if (!shinfo) {
- return -ENOTSUP;
- }
- vcpu_info = kvm_xen_get_vcpu_info_hva(s->port_table[port].vcpu);
- if (!vcpu_info) {
- return -EINVAL;
- }
- if (xen_is_long_mode()) {
- return do_set_port_lm(s, port, shinfo, vcpu_info);
- } else {
- return do_set_port_compat(s, port, shinfo, vcpu_info);
- }
- }
- static int clear_port_pending(XenEvtchnState *s, evtchn_port_t port)
- {
- void *p = xen_overlay_get_shinfo_ptr();
- if (!p) {
- return -ENOTSUP;
- }
- if (xen_is_long_mode()) {
- struct shared_info *shinfo = p;
- const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
- typeof(shinfo->evtchn_pending[0]) mask;
- int idx = port / bits_per_word;
- int offset = port % bits_per_word;
- mask = 1UL << offset;
- qatomic_fetch_and(&shinfo->evtchn_pending[idx], ~mask);
- } else {
- struct compat_shared_info *shinfo = p;
- const int bits_per_word = BITS_PER_BYTE * sizeof(shinfo->evtchn_pending[0]);
- typeof(shinfo->evtchn_pending[0]) mask;
- int idx = port / bits_per_word;
- int offset = port % bits_per_word;
- mask = 1UL << offset;
- qatomic_fetch_and(&shinfo->evtchn_pending[idx], ~mask);
- }
- return 0;
- }
- static void free_port(XenEvtchnState *s, evtchn_port_t port)
- {
- s->port_table[port].type = EVTCHNSTAT_closed;
- s->port_table[port].u.val = 0;
- s->port_table[port].vcpu = 0;
- if (s->nr_ports == port + 1) {
- do {
- s->nr_ports--;
- } while (s->nr_ports &&
- s->port_table[s->nr_ports - 1].type == EVTCHNSTAT_closed);
- }
- /* Clear pending event to avoid unexpected behavior on re-bind. */
- clear_port_pending(s, port);
- }
- static int allocate_port(XenEvtchnState *s, uint32_t vcpu, uint16_t type,
- uint16_t val, evtchn_port_t *port)
- {
- evtchn_port_t p = 1;
- for (p = 1; valid_port(p); p++) {
- if (s->port_table[p].type == EVTCHNSTAT_closed) {
- s->port_table[p].vcpu = vcpu;
- s->port_table[p].type = type;
- s->port_table[p].u.val = val;
- *port = p;
- if (s->nr_ports < p + 1) {
- s->nr_ports = p + 1;
- }
- return 0;
- }
- }
- return -ENOSPC;
- }
- static bool virq_is_global(uint32_t virq)
- {
- switch (virq) {
- case VIRQ_TIMER:
- case VIRQ_DEBUG:
- case VIRQ_XENOPROF:
- case VIRQ_XENPMU:
- return false;
- default:
- return true;
- }
- }
- static int close_port(XenEvtchnState *s, evtchn_port_t port,
- bool *flush_kvm_routes)
- {
- XenEvtchnPort *p = &s->port_table[port];
- /* Because it *might* be a PIRQ port */
- assert(bql_locked());
- switch (p->type) {
- case EVTCHNSTAT_closed:
- return -ENOENT;
- case EVTCHNSTAT_pirq:
- s->pirq[p->u.pirq].port = 0;
- if (s->pirq[p->u.pirq].is_translated) {
- *flush_kvm_routes = true;
- }
- break;
- case EVTCHNSTAT_virq:
- kvm_xen_set_vcpu_virq(virq_is_global(p->u.virq) ? 0 : p->vcpu,
- p->u.virq, 0);
- break;
- case EVTCHNSTAT_ipi:
- if (s->evtchn_in_kernel) {
- deassign_kernel_port(port);
- }
- break;
- case EVTCHNSTAT_interdomain:
- if (p->u.interdomain.to_qemu) {
- uint16_t be_port = p->u.interdomain.port;
- struct xenevtchn_handle *xc = s->be_handles[be_port];
- if (xc) {
- if (kvm_xen_has_cap(EVTCHN_SEND)) {
- deassign_kernel_port(port);
- }
- xc->guest_port = 0;
- }
- } else {
- /* Loopback interdomain */
- XenEvtchnPort *rp = &s->port_table[p->u.interdomain.port];
- if (!valid_port(p->u.interdomain.port) ||
- rp->u.interdomain.port != port ||
- rp->type != EVTCHNSTAT_interdomain) {
- error_report("Inconsistent state for interdomain unbind");
- } else {
- /* Set the other end back to unbound */
- rp->type = EVTCHNSTAT_unbound;
- rp->u.interdomain.port = 0;
- }
- }
- break;
- default:
- break;
- }
- free_port(s, port);
- return 0;
- }
- int xen_evtchn_soft_reset(void)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- bool flush_kvm_routes = false;
- int i;
- if (!s) {
- return -ENOTSUP;
- }
- assert(bql_locked());
- qemu_mutex_lock(&s->port_lock);
- for (i = 0; i < s->nr_ports; i++) {
- close_port(s, i, &flush_kvm_routes);
- }
- qemu_mutex_unlock(&s->port_lock);
- if (flush_kvm_routes) {
- kvm_update_msi_routes_all(NULL, true, 0, 0);
- }
- return 0;
- }
- int xen_evtchn_reset_op(struct evtchn_reset *reset)
- {
- if (reset->dom != DOMID_SELF && reset->dom != xen_domid) {
- return -ESRCH;
- }
- BQL_LOCK_GUARD();
- return xen_evtchn_soft_reset();
- }
- int xen_evtchn_close_op(struct evtchn_close *close)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- bool flush_kvm_routes = false;
- int ret;
- if (!s) {
- return -ENOTSUP;
- }
- if (!valid_port(close->port)) {
- return -EINVAL;
- }
- BQL_LOCK_GUARD();
- qemu_mutex_lock(&s->port_lock);
- ret = close_port(s, close->port, &flush_kvm_routes);
- qemu_mutex_unlock(&s->port_lock);
- if (flush_kvm_routes) {
- kvm_update_msi_routes_all(NULL, true, 0, 0);
- }
- return ret;
- }
- int xen_evtchn_unmask_op(struct evtchn_unmask *unmask)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- int ret;
- if (!s) {
- return -ENOTSUP;
- }
- if (!valid_port(unmask->port)) {
- return -EINVAL;
- }
- qemu_mutex_lock(&s->port_lock);
- ret = unmask_port(s, unmask->port, true);
- qemu_mutex_unlock(&s->port_lock);
- return ret;
- }
- int xen_evtchn_bind_vcpu_op(struct evtchn_bind_vcpu *vcpu)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- XenEvtchnPort *p;
- int ret = -EINVAL;
- if (!s) {
- return -ENOTSUP;
- }
- if (!valid_port(vcpu->port)) {
- return -EINVAL;
- }
- if (!valid_vcpu(vcpu->vcpu)) {
- return -ENOENT;
- }
- qemu_mutex_lock(&s->port_lock);
- p = &s->port_table[vcpu->port];
- if (p->type == EVTCHNSTAT_interdomain ||
- p->type == EVTCHNSTAT_unbound ||
- p->type == EVTCHNSTAT_pirq ||
- (p->type == EVTCHNSTAT_virq && virq_is_global(p->u.virq))) {
- /*
- * unmask_port() with do_unmask==false will just raise the event
- * on the new vCPU if the port was already pending.
- */
- p->vcpu = vcpu->vcpu;
- unmask_port(s, vcpu->port, false);
- ret = 0;
- }
- qemu_mutex_unlock(&s->port_lock);
- return ret;
- }
- int xen_evtchn_bind_virq_op(struct evtchn_bind_virq *virq)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- int ret;
- if (!s) {
- return -ENOTSUP;
- }
- if (virq->virq >= NR_VIRQS) {
- return -EINVAL;
- }
- /* Global VIRQ must be allocated on vCPU0 first */
- if (virq_is_global(virq->virq) && virq->vcpu != 0) {
- return -EINVAL;
- }
- if (!valid_vcpu(virq->vcpu)) {
- return -ENOENT;
- }
- qemu_mutex_lock(&s->port_lock);
- ret = allocate_port(s, virq->vcpu, EVTCHNSTAT_virq, virq->virq,
- &virq->port);
- if (!ret) {
- ret = kvm_xen_set_vcpu_virq(virq->vcpu, virq->virq, virq->port);
- if (ret) {
- free_port(s, virq->port);
- }
- }
- qemu_mutex_unlock(&s->port_lock);
- return ret;
- }
- int xen_evtchn_bind_pirq_op(struct evtchn_bind_pirq *pirq)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- int ret;
- if (!s) {
- return -ENOTSUP;
- }
- if (pirq->pirq >= s->nr_pirqs) {
- return -EINVAL;
- }
- BQL_LOCK_GUARD();
- if (s->pirq[pirq->pirq].port) {
- return -EBUSY;
- }
- qemu_mutex_lock(&s->port_lock);
- ret = allocate_port(s, 0, EVTCHNSTAT_pirq, pirq->pirq,
- &pirq->port);
- if (ret) {
- qemu_mutex_unlock(&s->port_lock);
- return ret;
- }
- s->pirq[pirq->pirq].port = pirq->port;
- trace_kvm_xen_bind_pirq(pirq->pirq, pirq->port);
- qemu_mutex_unlock(&s->port_lock);
- /*
- * Need to do the unmask outside port_lock because it may call
- * back into the MSI translate function.
- */
- if (s->pirq[pirq->pirq].gsi == IRQ_MSI_EMU) {
- if (s->pirq[pirq->pirq].is_masked) {
- PCIDevice *dev = s->pirq[pirq->pirq].dev;
- int vector = s->pirq[pirq->pirq].vector;
- char *dev_path = qdev_get_dev_path(DEVICE(dev));
- trace_kvm_xen_unmask_pirq(pirq->pirq, dev_path, vector);
- g_free(dev_path);
- if (s->pirq[pirq->pirq].is_msix) {
- msix_set_mask(dev, vector, false);
- } else {
- msi_set_mask(dev, vector, false, NULL);
- }
- } else if (s->pirq[pirq->pirq].is_translated) {
- /*
- * If KVM had attempted to translate this one before, make it try
- * again. If we unmasked, then the notifier on the MSI(-X) vector
- * will already have had the same effect.
- */
- kvm_update_msi_routes_all(NULL, true, 0, 0);
- }
- }
- return ret;
- }
- int xen_evtchn_bind_ipi_op(struct evtchn_bind_ipi *ipi)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- int ret;
- if (!s) {
- return -ENOTSUP;
- }
- if (!valid_vcpu(ipi->vcpu)) {
- return -ENOENT;
- }
- qemu_mutex_lock(&s->port_lock);
- ret = allocate_port(s, ipi->vcpu, EVTCHNSTAT_ipi, 0, &ipi->port);
- if (!ret && s->evtchn_in_kernel) {
- assign_kernel_port(EVTCHNSTAT_ipi, ipi->port, ipi->vcpu);
- }
- qemu_mutex_unlock(&s->port_lock);
- return ret;
- }
- int xen_evtchn_bind_interdomain_op(struct evtchn_bind_interdomain *interdomain)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- int ret;
- if (!s) {
- return -ENOTSUP;
- }
- if (interdomain->remote_dom != DOMID_QEMU &&
- interdomain->remote_dom != DOMID_SELF &&
- interdomain->remote_dom != xen_domid) {
- return -ESRCH;
- }
- if (!valid_port(interdomain->remote_port)) {
- return -EINVAL;
- }
- qemu_mutex_lock(&s->port_lock);
- /* The newly allocated port starts out as unbound */
- ret = allocate_port(s, 0, EVTCHNSTAT_unbound, 0, &interdomain->local_port);
- if (ret) {
- goto out;
- }
- if (interdomain->remote_dom == DOMID_QEMU) {
- struct xenevtchn_handle *xc = s->be_handles[interdomain->remote_port];
- XenEvtchnPort *lp = &s->port_table[interdomain->local_port];
- if (!xc) {
- ret = -ENOENT;
- goto out_free_port;
- }
- if (xc->guest_port) {
- ret = -EBUSY;
- goto out_free_port;
- }
- assert(xc->be_port == interdomain->remote_port);
- xc->guest_port = interdomain->local_port;
- if (kvm_xen_has_cap(EVTCHN_SEND)) {
- assign_kernel_eventfd(lp->type, xc->guest_port, xc->fd);
- }
- lp->type = EVTCHNSTAT_interdomain;
- lp->u.interdomain.to_qemu = 1;
- lp->u.interdomain.port = interdomain->remote_port;
- ret = 0;
- } else {
- /* Loopback */
- XenEvtchnPort *rp = &s->port_table[interdomain->remote_port];
- XenEvtchnPort *lp = &s->port_table[interdomain->local_port];
- /*
- * The 'remote' port for loopback must be an unbound port allocated
- * for communication with the local domain, and must *not* be the
- * port that was just allocated for the local end.
- */
- if (interdomain->local_port != interdomain->remote_port &&
- rp->type == EVTCHNSTAT_unbound && !rp->u.interdomain.to_qemu) {
- rp->type = EVTCHNSTAT_interdomain;
- rp->u.interdomain.port = interdomain->local_port;
- lp->type = EVTCHNSTAT_interdomain;
- lp->u.interdomain.port = interdomain->remote_port;
- } else {
- ret = -EINVAL;
- }
- }
- out_free_port:
- if (ret) {
- free_port(s, interdomain->local_port);
- }
- out:
- qemu_mutex_unlock(&s->port_lock);
- return ret;
- }
- int xen_evtchn_alloc_unbound_op(struct evtchn_alloc_unbound *alloc)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- int ret;
- if (!s) {
- return -ENOTSUP;
- }
- if (alloc->dom != DOMID_SELF && alloc->dom != xen_domid) {
- return -ESRCH;
- }
- if (alloc->remote_dom != DOMID_QEMU &&
- alloc->remote_dom != DOMID_SELF &&
- alloc->remote_dom != xen_domid) {
- return -EPERM;
- }
- qemu_mutex_lock(&s->port_lock);
- ret = allocate_port(s, 0, EVTCHNSTAT_unbound, 0, &alloc->port);
- if (!ret && alloc->remote_dom == DOMID_QEMU) {
- XenEvtchnPort *p = &s->port_table[alloc->port];
- p->u.interdomain.to_qemu = 1;
- }
- qemu_mutex_unlock(&s->port_lock);
- return ret;
- }
- int xen_evtchn_send_op(struct evtchn_send *send)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- XenEvtchnPort *p;
- int ret = 0;
- if (!s) {
- return -ENOTSUP;
- }
- if (!valid_port(send->port)) {
- return -EINVAL;
- }
- qemu_mutex_lock(&s->port_lock);
- p = &s->port_table[send->port];
- switch (p->type) {
- case EVTCHNSTAT_interdomain:
- if (p->u.interdomain.to_qemu) {
- /*
- * This is an event from the guest to qemu itself, which is
- * serving as the driver domain.
- */
- uint16_t be_port = p->u.interdomain.port;
- struct xenevtchn_handle *xc = s->be_handles[be_port];
- if (xc) {
- eventfd_write(xc->fd, 1);
- ret = 0;
- } else {
- ret = -ENOENT;
- }
- } else {
- /* Loopback interdomain ports; just a complex IPI */
- set_port_pending(s, p->u.interdomain.port);
- }
- break;
- case EVTCHNSTAT_ipi:
- set_port_pending(s, send->port);
- break;
- case EVTCHNSTAT_unbound:
- /* Xen will silently drop these */
- break;
- default:
- ret = -EINVAL;
- break;
- }
- qemu_mutex_unlock(&s->port_lock);
- return ret;
- }
- int xen_evtchn_set_port(uint16_t port)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- XenEvtchnPort *p;
- int ret = -EINVAL;
- if (!s) {
- return -ENOTSUP;
- }
- if (!valid_port(port)) {
- return -EINVAL;
- }
- qemu_mutex_lock(&s->port_lock);
- p = &s->port_table[port];
- /* QEMU has no business sending to anything but these */
- if (p->type == EVTCHNSTAT_virq ||
- (p->type == EVTCHNSTAT_interdomain && p->u.interdomain.to_qemu)) {
- set_port_pending(s, port);
- ret = 0;
- }
- qemu_mutex_unlock(&s->port_lock);
- return ret;
- }
- static int allocate_pirq(XenEvtchnState *s, int type, int gsi)
- {
- uint16_t pirq;
- /*
- * Preserve the allocation strategy that Xen has. It looks like
- * we *never* give out PIRQ 0-15, we give out 16-nr_irqs_gsi only
- * to GSIs (counting up from 16), and then we count backwards from
- * the top for MSIs or when the GSI space is exhausted.
- */
- if (type == MAP_PIRQ_TYPE_GSI) {
- for (pirq = 16 ; pirq < IOAPIC_NUM_PINS; pirq++) {
- if (pirq_inuse(s, pirq)) {
- continue;
- }
- /* Found it */
- goto found;
- }
- }
- for (pirq = s->nr_pirqs - 1; pirq >= IOAPIC_NUM_PINS; pirq--) {
- /* Skip whole words at a time when they're full */
- if (pirq_inuse_word(s, pirq) == UINT64_MAX) {
- pirq &= ~63ULL;
- continue;
- }
- if (pirq_inuse(s, pirq)) {
- continue;
- }
- goto found;
- }
- return -ENOSPC;
- found:
- pirq_inuse_word(s, pirq) |= pirq_inuse_bit(pirq);
- if (gsi >= 0) {
- assert(gsi < IOAPIC_NUM_PINS);
- s->gsi_pirq[gsi] = pirq;
- }
- s->pirq[pirq].gsi = gsi;
- return pirq;
- }
- bool xen_evtchn_set_gsi(int gsi, int *level)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- int pirq;
- assert(bql_locked());
- if (!s || gsi < 0 || gsi >= IOAPIC_NUM_PINS) {
- return false;
- }
- /*
- * For the callback_gsi we need to implement a logical OR of the event
- * channel GSI and the external input (e.g. from PCI INTx), because
- * QEMU itself doesn't support shared level interrupts via demux or
- * resamplers.
- */
- if (gsi && gsi == s->callback_gsi) {
- /* Remember the external state of the GSI pin (e.g. from PCI INTx) */
- if (!s->setting_callback_gsi) {
- s->extern_gsi_level = *level;
- /*
- * Don't allow the external device to deassert the line if the
- * eveht channel GSI should still be asserted.
- */
- if (!s->extern_gsi_level) {
- struct vcpu_info *vi = kvm_xen_get_vcpu_info_hva(0);
- if (vi && vi->evtchn_upcall_pending) {
- /* Need to poll for deassertion */
- kvm_xen_set_callback_asserted();
- *level = 1;
- }
- }
- }
- /*
- * The event channel GSI cannot be routed to PIRQ, as that would make
- * no sense. It could also deadlock on s->port_lock, if we proceed.
- * So bail out now.
- */
- return false;
- }
- QEMU_LOCK_GUARD(&s->port_lock);
- pirq = s->gsi_pirq[gsi];
- if (!pirq) {
- return false;
- }
- if (*level) {
- int port = s->pirq[pirq].port;
- s->pirq_gsi_set |= (1U << gsi);
- if (port) {
- set_port_pending(s, port);
- }
- } else {
- s->pirq_gsi_set &= ~(1U << gsi);
- }
- return true;
- }
- static uint32_t msi_pirq_target(uint64_t addr, uint32_t data)
- {
- /* The vector (in low 8 bits of data) must be zero */
- if (data & 0xff) {
- return 0;
- }
- uint32_t pirq = (addr & 0xff000) >> 12;
- pirq |= (addr >> 32) & 0xffffff00;
- return pirq;
- }
- static void do_remove_pci_vector(XenEvtchnState *s, PCIDevice *dev, int vector,
- int except_pirq)
- {
- uint32_t pirq;
- for (pirq = 0; pirq < s->nr_pirqs; pirq++) {
- /*
- * We could be cleverer here, but it isn't really a fast path, and
- * this trivial optimisation is enough to let us skip the big gap
- * in the middle a bit quicker (in terms of both loop iterations,
- * and cache lines).
- */
- if (!(pirq & 63) && !(pirq_inuse_word(s, pirq))) {
- pirq += 64;
- continue;
- }
- if (except_pirq && pirq == except_pirq) {
- continue;
- }
- if (s->pirq[pirq].dev != dev) {
- continue;
- }
- if (vector != -1 && s->pirq[pirq].vector != vector) {
- continue;
- }
- /* It could theoretically be bound to a port already, but that is OK. */
- s->pirq[pirq].dev = dev;
- s->pirq[pirq].gsi = IRQ_UNBOUND;
- s->pirq[pirq].is_msix = false;
- s->pirq[pirq].vector = 0;
- s->pirq[pirq].is_masked = false;
- s->pirq[pirq].is_translated = false;
- }
- }
- void xen_evtchn_remove_pci_device(PCIDevice *dev)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- if (!s) {
- return;
- }
- QEMU_LOCK_GUARD(&s->port_lock);
- do_remove_pci_vector(s, dev, -1, 0);
- }
- void xen_evtchn_snoop_msi(PCIDevice *dev, bool is_msix, unsigned int vector,
- uint64_t addr, uint32_t data, bool is_masked)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- uint32_t pirq;
- if (!s) {
- return;
- }
- assert(bql_locked());
- pirq = msi_pirq_target(addr, data);
- /*
- * The PIRQ# must be sane, and there must be an allocated PIRQ in
- * IRQ_UNBOUND or IRQ_MSI_EMU state to match it.
- */
- if (!pirq || pirq >= s->nr_pirqs || !pirq_inuse(s, pirq) ||
- (s->pirq[pirq].gsi != IRQ_UNBOUND &&
- s->pirq[pirq].gsi != IRQ_MSI_EMU)) {
- pirq = 0;
- }
- if (pirq) {
- s->pirq[pirq].dev = dev;
- s->pirq[pirq].gsi = IRQ_MSI_EMU;
- s->pirq[pirq].is_msix = is_msix;
- s->pirq[pirq].vector = vector;
- s->pirq[pirq].is_masked = is_masked;
- }
- /* Remove any (other) entries for this {device, vector} */
- do_remove_pci_vector(s, dev, vector, pirq);
- }
- int xen_evtchn_translate_pirq_msi(struct kvm_irq_routing_entry *route,
- uint64_t address, uint32_t data)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- uint32_t pirq, port;
- CPUState *cpu;
- if (!s) {
- return 1; /* Not a PIRQ */
- }
- assert(bql_locked());
- pirq = msi_pirq_target(address, data);
- if (!pirq || pirq >= s->nr_pirqs) {
- return 1; /* Not a PIRQ */
- }
- if (!kvm_xen_has_cap(EVTCHN_2LEVEL)) {
- return -ENOTSUP;
- }
- if (s->pirq[pirq].gsi != IRQ_MSI_EMU) {
- return -EINVAL;
- }
- /* Remember that KVM tried to translate this. It might need to try again. */
- s->pirq[pirq].is_translated = true;
- QEMU_LOCK_GUARD(&s->port_lock);
- port = s->pirq[pirq].port;
- if (!valid_port(port)) {
- return -EINVAL;
- }
- cpu = qemu_get_cpu(s->port_table[port].vcpu);
- if (!cpu) {
- return -EINVAL;
- }
- route->type = KVM_IRQ_ROUTING_XEN_EVTCHN;
- route->u.xen_evtchn.port = port;
- route->u.xen_evtchn.vcpu = kvm_arch_vcpu_id(cpu);
- route->u.xen_evtchn.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL;
- return 0; /* Handled */
- }
- bool xen_evtchn_deliver_pirq_msi(uint64_t address, uint32_t data)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- uint32_t pirq, port;
- if (!s) {
- return false;
- }
- assert(bql_locked());
- pirq = msi_pirq_target(address, data);
- if (!pirq || pirq >= s->nr_pirqs) {
- return false;
- }
- QEMU_LOCK_GUARD(&s->port_lock);
- port = s->pirq[pirq].port;
- if (!valid_port(port)) {
- return false;
- }
- set_port_pending(s, port);
- return true;
- }
- int xen_physdev_map_pirq(struct physdev_map_pirq *map)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- int pirq = map->pirq;
- int gsi = map->index;
- if (!s) {
- return -ENOTSUP;
- }
- BQL_LOCK_GUARD();
- QEMU_LOCK_GUARD(&s->port_lock);
- if (map->domid != DOMID_SELF && map->domid != xen_domid) {
- return -EPERM;
- }
- if (map->type != MAP_PIRQ_TYPE_GSI) {
- return -EINVAL;
- }
- if (gsi < 0 || gsi >= IOAPIC_NUM_PINS) {
- return -EINVAL;
- }
- if (pirq < 0) {
- pirq = allocate_pirq(s, map->type, gsi);
- if (pirq < 0) {
- return pirq;
- }
- map->pirq = pirq;
- } else if (pirq > s->nr_pirqs) {
- return -EINVAL;
- } else {
- /*
- * User specified a valid-looking PIRQ#. Allow it if it is
- * allocated and not yet bound, or if it is unallocated
- */
- if (pirq_inuse(s, pirq)) {
- if (s->pirq[pirq].gsi != IRQ_UNBOUND) {
- return -EBUSY;
- }
- } else {
- /* If it was unused, mark it used now. */
- pirq_inuse_word(s, pirq) |= pirq_inuse_bit(pirq);
- }
- /* Set the mapping in both directions. */
- s->pirq[pirq].gsi = gsi;
- s->gsi_pirq[gsi] = pirq;
- }
- trace_kvm_xen_map_pirq(pirq, gsi);
- return 0;
- }
- int xen_physdev_unmap_pirq(struct physdev_unmap_pirq *unmap)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- int pirq = unmap->pirq;
- int gsi;
- if (!s) {
- return -ENOTSUP;
- }
- if (unmap->domid != DOMID_SELF && unmap->domid != xen_domid) {
- return -EPERM;
- }
- if (pirq < 0 || pirq >= s->nr_pirqs) {
- return -EINVAL;
- }
- BQL_LOCK_GUARD();
- qemu_mutex_lock(&s->port_lock);
- if (!pirq_inuse(s, pirq)) {
- qemu_mutex_unlock(&s->port_lock);
- return -ENOENT;
- }
- gsi = s->pirq[pirq].gsi;
- /* We can only unmap GSI PIRQs */
- if (gsi < 0) {
- qemu_mutex_unlock(&s->port_lock);
- return -EINVAL;
- }
- s->gsi_pirq[gsi] = 0;
- s->pirq[pirq].gsi = IRQ_UNBOUND; /* Doesn't actually matter because: */
- pirq_inuse_word(s, pirq) &= ~pirq_inuse_bit(pirq);
- trace_kvm_xen_unmap_pirq(pirq, gsi);
- qemu_mutex_unlock(&s->port_lock);
- if (gsi == IRQ_MSI_EMU) {
- kvm_update_msi_routes_all(NULL, true, 0, 0);
- }
- return 0;
- }
- int xen_physdev_eoi_pirq(struct physdev_eoi *eoi)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- int pirq = eoi->irq;
- int gsi;
- if (!s) {
- return -ENOTSUP;
- }
- BQL_LOCK_GUARD();
- QEMU_LOCK_GUARD(&s->port_lock);
- if (!pirq_inuse(s, pirq)) {
- return -ENOENT;
- }
- gsi = s->pirq[pirq].gsi;
- if (gsi < 0) {
- return -EINVAL;
- }
- /* Reassert a level IRQ if needed */
- if (s->pirq_gsi_set & (1U << gsi)) {
- int port = s->pirq[pirq].port;
- if (port) {
- set_port_pending(s, port);
- }
- }
- return 0;
- }
- int xen_physdev_query_pirq(struct physdev_irq_status_query *query)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- int pirq = query->irq;
- if (!s) {
- return -ENOTSUP;
- }
- BQL_LOCK_GUARD();
- QEMU_LOCK_GUARD(&s->port_lock);
- if (!pirq_inuse(s, pirq)) {
- return -ENOENT;
- }
- if (s->pirq[pirq].gsi >= 0) {
- query->flags = XENIRQSTAT_needs_eoi;
- } else {
- query->flags = 0;
- }
- return 0;
- }
- int xen_physdev_get_free_pirq(struct physdev_get_free_pirq *get)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- int pirq;
- if (!s) {
- return -ENOTSUP;
- }
- QEMU_LOCK_GUARD(&s->port_lock);
- pirq = allocate_pirq(s, get->type, IRQ_UNBOUND);
- if (pirq < 0) {
- return pirq;
- }
- get->pirq = pirq;
- trace_kvm_xen_get_free_pirq(pirq, get->type);
- return 0;
- }
- struct xenevtchn_handle *xen_be_evtchn_open(void)
- {
- struct xenevtchn_handle *xc = g_new0(struct xenevtchn_handle, 1);
- xc->fd = eventfd(0, EFD_CLOEXEC);
- if (xc->fd < 0) {
- free(xc);
- return NULL;
- }
- return xc;
- }
- static int find_be_port(XenEvtchnState *s, struct xenevtchn_handle *xc)
- {
- int i;
- for (i = 1; i < EVTCHN_2L_NR_CHANNELS; i++) {
- if (!s->be_handles[i]) {
- s->be_handles[i] = xc;
- xc->be_port = i;
- return i;
- }
- }
- return 0;
- }
- int xen_be_evtchn_bind_interdomain(struct xenevtchn_handle *xc, uint32_t domid,
- evtchn_port_t guest_port)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- XenEvtchnPort *gp;
- uint16_t be_port = 0;
- int ret;
- if (!s) {
- return -ENOTSUP;
- }
- if (!xc) {
- return -EFAULT;
- }
- if (domid != xen_domid) {
- return -ESRCH;
- }
- if (!valid_port(guest_port)) {
- return -EINVAL;
- }
- qemu_mutex_lock(&s->port_lock);
- /* The guest has to have an unbound port waiting for us to bind */
- gp = &s->port_table[guest_port];
- switch (gp->type) {
- case EVTCHNSTAT_interdomain:
- /* Allow rebinding after migration, preserve port # if possible */
- be_port = gp->u.interdomain.port;
- assert(be_port != 0);
- if (!s->be_handles[be_port]) {
- s->be_handles[be_port] = xc;
- xc->guest_port = guest_port;
- ret = xc->be_port = be_port;
- if (kvm_xen_has_cap(EVTCHN_SEND)) {
- assign_kernel_eventfd(gp->type, guest_port, xc->fd);
- }
- break;
- }
- /* fall through */
- case EVTCHNSTAT_unbound:
- be_port = find_be_port(s, xc);
- if (!be_port) {
- ret = -ENOSPC;
- goto out;
- }
- gp->type = EVTCHNSTAT_interdomain;
- gp->u.interdomain.to_qemu = 1;
- gp->u.interdomain.port = be_port;
- xc->guest_port = guest_port;
- if (kvm_xen_has_cap(EVTCHN_SEND)) {
- assign_kernel_eventfd(gp->type, guest_port, xc->fd);
- }
- ret = be_port;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- out:
- qemu_mutex_unlock(&s->port_lock);
- return ret;
- }
- int xen_be_evtchn_unbind(struct xenevtchn_handle *xc, evtchn_port_t port)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- int ret;
- if (!s) {
- return -ENOTSUP;
- }
- if (!xc) {
- return -EFAULT;
- }
- qemu_mutex_lock(&s->port_lock);
- if (port && port != xc->be_port) {
- ret = -EINVAL;
- goto out;
- }
- if (xc->guest_port) {
- XenEvtchnPort *gp = &s->port_table[xc->guest_port];
- /* This should never *not* be true */
- if (gp->type == EVTCHNSTAT_interdomain) {
- gp->type = EVTCHNSTAT_unbound;
- gp->u.interdomain.port = 0;
- }
- if (kvm_xen_has_cap(EVTCHN_SEND)) {
- deassign_kernel_port(xc->guest_port);
- }
- xc->guest_port = 0;
- }
- s->be_handles[xc->be_port] = NULL;
- xc->be_port = 0;
- ret = 0;
- out:
- qemu_mutex_unlock(&s->port_lock);
- return ret;
- }
- int xen_be_evtchn_close(struct xenevtchn_handle *xc)
- {
- if (!xc) {
- return -EFAULT;
- }
- xen_be_evtchn_unbind(xc, 0);
- close(xc->fd);
- free(xc);
- return 0;
- }
- int xen_be_evtchn_fd(struct xenevtchn_handle *xc)
- {
- if (!xc) {
- return -1;
- }
- return xc->fd;
- }
- int xen_be_evtchn_notify(struct xenevtchn_handle *xc, evtchn_port_t port)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- int ret;
- if (!s) {
- return -ENOTSUP;
- }
- if (!xc) {
- return -EFAULT;
- }
- qemu_mutex_lock(&s->port_lock);
- if (xc->guest_port) {
- set_port_pending(s, xc->guest_port);
- ret = 0;
- } else {
- ret = -ENOTCONN;
- }
- qemu_mutex_unlock(&s->port_lock);
- return ret;
- }
- int xen_be_evtchn_pending(struct xenevtchn_handle *xc)
- {
- uint64_t val;
- if (!xc) {
- return -EFAULT;
- }
- if (!xc->be_port) {
- return 0;
- }
- if (eventfd_read(xc->fd, &val)) {
- return -errno;
- }
- return val ? xc->be_port : 0;
- }
- int xen_be_evtchn_unmask(struct xenevtchn_handle *xc, evtchn_port_t port)
- {
- if (!xc) {
- return -EFAULT;
- }
- if (xc->be_port != port) {
- return -EINVAL;
- }
- /*
- * We don't actually do anything to unmask it; the event was already
- * consumed in xen_be_evtchn_pending().
- */
- return 0;
- }
- int xen_be_evtchn_get_guest_port(struct xenevtchn_handle *xc)
- {
- return xc->guest_port;
- }
- EvtchnInfoList *qmp_xen_event_list(Error **errp)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- EvtchnInfoList *head = NULL, **tail = &head;
- void *shinfo, *pending, *mask;
- int i;
- if (!s) {
- error_setg(errp, "Xen event channel emulation not enabled");
- return NULL;
- }
- shinfo = xen_overlay_get_shinfo_ptr();
- if (!shinfo) {
- error_setg(errp, "Xen shared info page not allocated");
- return NULL;
- }
- if (xen_is_long_mode()) {
- pending = shinfo + offsetof(struct shared_info, evtchn_pending);
- mask = shinfo + offsetof(struct shared_info, evtchn_mask);
- } else {
- pending = shinfo + offsetof(struct compat_shared_info, evtchn_pending);
- mask = shinfo + offsetof(struct compat_shared_info, evtchn_mask);
- }
- QEMU_LOCK_GUARD(&s->port_lock);
- for (i = 0; i < s->nr_ports; i++) {
- XenEvtchnPort *p = &s->port_table[i];
- EvtchnInfo *info;
- if (p->type == EVTCHNSTAT_closed) {
- continue;
- }
- info = g_new0(EvtchnInfo, 1);
- info->port = i;
- qemu_build_assert(EVTCHN_PORT_TYPE_CLOSED == EVTCHNSTAT_closed);
- qemu_build_assert(EVTCHN_PORT_TYPE_UNBOUND == EVTCHNSTAT_unbound);
- qemu_build_assert(EVTCHN_PORT_TYPE_INTERDOMAIN == EVTCHNSTAT_interdomain);
- qemu_build_assert(EVTCHN_PORT_TYPE_PIRQ == EVTCHNSTAT_pirq);
- qemu_build_assert(EVTCHN_PORT_TYPE_VIRQ == EVTCHNSTAT_virq);
- qemu_build_assert(EVTCHN_PORT_TYPE_IPI == EVTCHNSTAT_ipi);
- info->type = p->type;
- if (p->type == EVTCHNSTAT_interdomain) {
- info->remote_domain = g_strdup(p->u.interdomain.to_qemu ?
- "qemu" : "loopback");
- info->target = p->u.interdomain.port;
- } else {
- info->target = p->u.val; /* pirq# or virq# */
- }
- info->vcpu = p->vcpu;
- info->pending = test_bit(i, pending);
- info->masked = test_bit(i, mask);
- QAPI_LIST_APPEND(tail, info);
- }
- return head;
- }
- void qmp_xen_event_inject(uint32_t port, Error **errp)
- {
- XenEvtchnState *s = xen_evtchn_singleton;
- if (!s) {
- error_setg(errp, "Xen event channel emulation not enabled");
- return;
- }
- if (!valid_port(port)) {
- error_setg(errp, "Invalid port %u", port);
- }
- QEMU_LOCK_GUARD(&s->port_lock);
- if (set_port_pending(s, port)) {
- error_setg(errp, "Failed to set port %u", port);
- return;
- }
- }
- void hmp_xen_event_list(Monitor *mon, const QDict *qdict)
- {
- EvtchnInfoList *iter, *info_list;
- Error *err = NULL;
- info_list = qmp_xen_event_list(&err);
- if (err) {
- hmp_handle_error(mon, err);
- return;
- }
- for (iter = info_list; iter; iter = iter->next) {
- EvtchnInfo *info = iter->value;
- monitor_printf(mon, "port %4u: vcpu: %d %s", info->port, info->vcpu,
- EvtchnPortType_str(info->type));
- if (info->type != EVTCHN_PORT_TYPE_IPI) {
- monitor_printf(mon, "(");
- if (info->remote_domain) {
- monitor_printf(mon, "%s:", info->remote_domain);
- }
- monitor_printf(mon, "%d)", info->target);
- }
- if (info->pending) {
- monitor_printf(mon, " PENDING");
- }
- if (info->masked) {
- monitor_printf(mon, " MASKED");
- }
- monitor_printf(mon, "\n");
- }
- qapi_free_EvtchnInfoList(info_list);
- }
- void hmp_xen_event_inject(Monitor *mon, const QDict *qdict)
- {
- int port = qdict_get_int(qdict, "port");
- Error *err = NULL;
- qmp_xen_event_inject(port, &err);
- if (err) {
- hmp_handle_error(mon, err);
- } else {
- monitor_printf(mon, "Delivered port %d\n", port);
- }
- }
|