Port-amd64 archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
Re: [PATCH] Add intr_mask() / intr_unmask() interface to mask / unmask individual interrupt sources
- To: Jason Thorpe <thorpej%me.com@localhost>
- Subject: Re: [PATCH] Add intr_mask() / intr_unmask() interface to mask / unmask individual interrupt sources
- From: Andrew Doran <ad%netbsd.org@localhost>
- Date: Fri, 20 Dec 2019 17:42:39 +0000
Hi,
On Thu, Dec 19, 2019 at 03:20:00PM +0200, Jason Thorpe wrote:
> +Andrew for his comments as well.
The x86 interrupt changes look okay to me, with one minor nit:
intr_establish_xname, intr_mask_internal
- don't need the mp_online check in -current, xcall does same now
I've been following the discussion only peripherally, but my one comment is
that it sucks that you actually have a need to mask an interrupt source in
regular operation the first place. Doesn't sound like a fun problem. No
objection from me there though.
Andrew
> > On Dec 1, 2019, at 3:16 AM, Jason Thorpe <thorpej%me.com@localhost> wrote:
> >
> > Ok, so I thought about this some more, and there is a problem there, but not the one you mentioned.
> >
> > intr_mask() is intended to be callable from an interrupt handler, but you can't take the cpu_lock in that case, because that's a MUTEX_DEFAULT mutex. The code can be tweaked to address this issue, but yes, places where the mask count is checked do need to be protected in a block that disables interrupts.
> >
> > I'll post a follow-up patch shortly.
>
> Well, FSVO "shortly". Anyway, I *think* this is the final patch. The main change between this one and the previous is that this one also uses an xcall to perform the "hwunmask" operation. I added the relevant details in code comments, so please make sure to read them.
>
> Again, to recap:
>
> 1- Adds intr_mask() and intr_unmask() functions that take as an argument the cookie returned by intr_establish().
>
> 2- intr_mask() calls can be nested (i.e. there is a mask count).
>
> 3- intr_mask() can be called from interrupt context; intr_unmask() cannot (softint context is OK).
>
> 4- Whereas spl*() masks off a logical interrupt level (e.g. IPL_NET), intr_mask() masks off an individual interrupt source / line.
>
> 5- Wrappers for ACPI are included (following the existing ACPI interrupt code layering model).
>
> 6- Adapts the hid-over-i2c driver, so as to avoid using i2c in interrupt context. I don't have such a device. Please ping me off-list if you have one and would be willing to test a kernel with these changes.
>
> 7- Making all of this stuff work with Xen is up to the people who maintain our Xen code. As of right now, it doesn't, and I don't know what will happen if code on a Xen system tries to call intr_mask().
>
> diff --git a/sys/arch/amd64/amd64/genassym.cf b/sys/arch/amd64/amd64/genassym.cf
> index 168cae5cb8b5..270a9118d840 100644
> --- a/sys/arch/amd64/amd64/genassym.cf
> +++ b/sys/arch/amd64/amd64/genassym.cf
> @@ -320,7 +320,8 @@ define IS_FLAGS offsetof(struct intrsource, is_flags)
> define IS_PIN offsetof(struct intrsource, is_pin)
> define IS_TYPE offsetof(struct intrsource, is_type)
> define IS_MAXLEVEL offsetof(struct intrsource, is_maxlevel)
> -define IS_LWP offsetof(struct intrsource, is_lwp)
> +define IS_LWP offsetof(struct intrsource, is_lwp)
> +define IS_MASK_COUNT offsetof(struct intrsource, is_mask_count)
>
> define IPL_NONE IPL_NONE
> define IPL_PREEMPT IPL_PREEMPT
> diff --git a/sys/arch/amd64/amd64/vector.S b/sys/arch/amd64/amd64/vector.S
> index 2ef3a7ba3b08..a0ac3fed6866 100644
> --- a/sys/arch/amd64/amd64/vector.S
> +++ b/sys/arch/amd64/amd64/vector.S
> @@ -387,6 +387,8 @@ IDTVEC(handle_ ## name ## num) ;\
> sti ;\
> incl CPUVAR(IDEPTH) ;\
> movq IS_HANDLERS(%r14),%rbx ;\
> + cmpl $0,IS_MASK_COUNT(%r14) /* source currently masked? */ ;\
> + jne 7f /* yes, hold it */ ;\
> 6: \
> movl IH_LEVEL(%rbx),%r12d ;\
> cmpl %r13d,%r12d ;\
> @@ -399,6 +401,8 @@ IDTVEC(handle_ ## name ## num) ;\
> testq %rbx,%rbx ;\
> jnz 6b ;\
> 5: \
> + cmpl $0,IS_MASK_COUNT(%r14) /* source now masked? */ ;\
> + jne 7f /* yes, deal */ ;\
> cli ;\
> unmask(num) /* unmask it in hardware */ ;\
> late_ack(num) ;\
> diff --git a/sys/arch/i386/i386/genassym.cf b/sys/arch/i386/i386/genassym.cf
> index 479c5319fc2f..0002e23d6a5f 100644
> --- a/sys/arch/i386/i386/genassym.cf
> +++ b/sys/arch/i386/i386/genassym.cf
> @@ -319,6 +319,7 @@ define IS_PIN offsetof(struct intrsource, is_pin)
> define IS_TYPE offsetof(struct intrsource, is_type)
> define IS_MAXLEVEL offsetof(struct intrsource, is_maxlevel)
> define IS_LWP offsetof(struct intrsource, is_lwp)
> +define IS_MASK_COUNT offsetof(struct intrsource, is_mask_count)
>
> define IPL_NONE IPL_NONE
> define IPL_PREEMPT IPL_PREEMPT
> diff --git a/sys/arch/i386/i386/vector.S b/sys/arch/i386/i386/vector.S
> index 5ef99df2713c..3c5a625cef49 100644
> --- a/sys/arch/i386/i386/vector.S
> +++ b/sys/arch/i386/i386/vector.S
> @@ -408,6 +408,8 @@ IDTVEC(intr_ ## name ## num) ;\
> IDEPTH_INCR ;\
> sti ;\
> movl IS_HANDLERS(%ebp),%ebx ;\
> + cmpl $0,IS_MASK_COUNT(%ebp) /* source currently masked? */ ;\
> + jne 7f /* yes, hold it */ ;\
> 6: \
> movl IH_LEVEL(%ebx),%edi ;\
> cmpl %esi,%edi ;\
> @@ -420,6 +422,8 @@ IDTVEC(intr_ ## name ## num) ;\
> addl $4,%esp /* toss the arg */ ;\
> testl %ebx,%ebx ;\
> jnz 6b ;\
> + cmpl $0,IS_MASK_COUNT(%ebp) /* source now masked? */ ;\
> + jne 7f /* yes, deal */ ;\
> cli ;\
> unmask(num) /* unmask it in hardware */ ;\
> late_ack(num) ;\
> diff --git a/sys/arch/x86/acpi/acpi_machdep.c b/sys/arch/x86/acpi/acpi_machdep.c
> index 41f39d2b52f0..3519fdf7ba79 100644
> --- a/sys/arch/x86/acpi/acpi_machdep.c
> +++ b/sys/arch/x86/acpi/acpi_machdep.c
> @@ -294,6 +294,18 @@ acpi_md_intr_establish(uint32_t InterruptNumber, int ipl, int type,
> return ih;
> }
>
> +void
> +acpi_md_intr_mask(void *ih)
> +{
> + intr_mask(ih);
> +}
> +
> +void
> +acpi_md_intr_unmask(void *ih)
> +{
> + intr_unmask(ih);
> +}
> +
> void
> acpi_md_intr_disestablish(void *ih)
> {
> diff --git a/sys/arch/x86/include/acpi_machdep.h b/sys/arch/x86/include/acpi_machdep.h
> index 86a7bd36d337..a2277cd5811f 100644
> --- a/sys/arch/x86/include/acpi_machdep.h
> +++ b/sys/arch/x86/include/acpi_machdep.h
> @@ -72,6 +72,8 @@ void acpi_md_OsEnableInterrupt(void);
>
> void * acpi_md_intr_establish(uint32_t, int, int, int (*)(void *),
> void *, bool, const char *);
> +void acpi_md_intr_mask(void *);
> +void acpi_md_intr_unmask(void *);
> void acpi_md_intr_disestablish(void *);
>
> int acpi_md_sleep(int);
> diff --git a/sys/arch/x86/include/intr.h b/sys/arch/x86/include/intr.h
> index 6fb4e9545338..f8d6e69cd24e 100644
> --- a/sys/arch/x86/include/intr.h
> +++ b/sys/arch/x86/include/intr.h
> @@ -1,7 +1,7 @@
> /* $NetBSD: intr.h,v 1.60 2019/02/14 08:18:25 cherry Exp $ */
>
> /*-
> - * Copyright (c) 1998, 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
> + * Copyright (c) 1998, 2001, 2006, 2007, 2008, 2019 The NetBSD Foundation, Inc.
> * All rights reserved.
> *
> * This code is derived from software contributed to The NetBSD Foundation
> @@ -95,6 +95,16 @@ struct intrsource {
> u_long ipl_evt_mask2[NR_EVENT_CHANNELS];
> #endif
> struct evcnt is_evcnt; /* interrupt counter per cpu */
> + /*
> + * is_mask_count requires special handling; it can only be modifed
> + * or examined on the CPU that owns the interrupt source, and such
> + * references need to be protected by disabling interrupts. This
> + * is because intr_mask() can be called from an interrupt handler.
> + * is_distribute_pending does not require such special handling
> + * because intr_unmask() cannot be called from an interrupt handler.
> + */
> + u_int is_mask_count; /* masked? (nested) [see above] */
> + int is_distribute_pending; /* ci<->ci move pending [cpu_lock] */
> int is_flags; /* see below */
> int is_type; /* level, edge */
> int is_idtvec;
> @@ -215,6 +225,8 @@ void x86_nmi(void);
> void *intr_establish_xname(int, struct pic *, int, int, int, int (*)(void *),
> void *, bool, const char *);
> void *intr_establish(int, struct pic *, int, int, int, int (*)(void *), void *, bool);
> +void intr_mask(struct intrhand *);
> +void intr_unmask(struct intrhand *);
> void intr_disestablish(struct intrhand *);
> void intr_add_pcibus(struct pcibus_attach_args *);
> const char *intr_string(intr_handle_t, char *, size_t);
> diff --git a/sys/arch/x86/x86/intr.c b/sys/arch/x86/x86/intr.c
> index 3722436e5886..07d678137e39 100644
> --- a/sys/arch/x86/x86/intr.c
> +++ b/sys/arch/x86/x86/intr.c
> @@ -1,11 +1,11 @@
> /* $NetBSD: intr.c,v 1.146 2019/06/17 06:38:30 msaitoh Exp $ */
>
> /*
> - * Copyright (c) 2007, 2008, 2009 The NetBSD Foundation, Inc.
> + * Copyright (c) 2007, 2008, 2009, 2019 The NetBSD Foundation, Inc.
> * All rights reserved.
> *
> * This code is derived from software contributed to The NetBSD Foundation
> - * by Andrew Doran.
> + * by Andrew Doran, and by Jason R. Thorpe.
> *
> * Redistribution and use in source and binary forms, with or without
> * modification, are permitted provided that the following conditions
> @@ -739,6 +739,34 @@ intr_append_intrsource_xname(struct intrsource *isp, const char *xname)
> strlcat(isp->is_xname, xname, sizeof(isp->is_xname));
> }
>
> +/*
> + * Called on bound CPU to handle calling pic_hwunmask from contexts
> + * that are not already running on the bound CPU.
> + *
> + * => caller (on initiating CPU) holds cpu_lock on our behalf
> + * => arg1: struct intrhand *ih
> + */
> +static void
> +intr_hwunmask_xcall(void *arg1, void *arg2)
> +{
> + struct intrhand * const ih = arg1;
> + struct cpu_info * const ci = ih->ih_cpu;
> +
> + KASSERT(ci == curcpu() || !mp_online);
> +
> + const u_long psl = x86_read_psl();
> + x86_disable_intr();
> +
> + struct intrsource * const source = ci->ci_isources[ih->ih_slot];
> + struct pic * const pic = source->is_pic;
> +
> + if (source->is_mask_count == 0) {
> + (*pic->pic_hwunmask)(pic, ih->ih_pin);
> + }
> +
> + x86_write_psl(psl);
> +}
> +
> /*
> * Handle per-CPU component of interrupt establish.
> *
> @@ -955,7 +983,12 @@ intr_establish_xname(int legacy_irq, struct pic *pic, int pin, int type,
>
> /* All set up, so add a route for the interrupt and unmask it. */
> (*pic->pic_addroute)(pic, ci, pin, idt_vec, type);
> - (*pic->pic_hwunmask)(pic, pin);
> + if (ci == curcpu() || !mp_online) {
> + intr_hwunmask_xcall(ih, NULL);
> + } else {
> + where = xc_unicast(0, intr_hwunmask_xcall, ih, NULL, ci);
> + xc_wait(where);
> + }
> mutex_exit(&cpu_lock);
>
> if (bootverbose || cpu_index(ci) != 0)
> @@ -976,6 +1009,118 @@ intr_establish(int legacy_irq, struct pic *pic, int pin, int type, int level,
> level, handler, arg, known_mpsafe, "unknown");
> }
>
> +/*
> + * Called on bound CPU to handle intr_mask() / intr_unmask().
> + *
> + * => caller (on initiating CPU) holds cpu_lock on our behalf
> + * => arg1: struct intrhand *ih
> + * => arg2: true -> mask, false -> unmask.
> + */
> +static void
> +intr_mask_xcall(void *arg1, void *arg2)
> +{
> + struct intrhand * const ih = arg1;
> + const uintptr_t mask = (uintptr_t)arg2;
> + struct cpu_info * const ci = ih->ih_cpu;
> + bool force_pending = false;
> +
> + KASSERT(ci == curcpu() || !mp_online);
> +
> + /*
> + * We need to disable interrupts to hold off the interrupt
> + * vectors.
> + */
> + const u_long psl = x86_read_psl();
> + x86_disable_intr();
> +
> + struct intrsource * const source = ci->ci_isources[ih->ih_slot];
> + struct pic * const pic = source->is_pic;
> +
> + if (mask) {
> + source->is_mask_count++;
> + KASSERT(source->is_mask_count != 0);
> + if (source->is_mask_count == 1) {
> + (*pic->pic_hwmask)(pic, ih->ih_pin);
> + }
> + } else {
> + KASSERT(source->is_mask_count != 0);
> + if (--source->is_mask_count == 0) {
> + /*
> + * If this interrupt source is being moved, don't
> + * unmask it at the hw.
> + */
> + if (! source->is_distribute_pending)
> + (*pic->pic_hwunmask)(pic, ih->ih_pin);
> + force_pending = true;
> + }
> + }
> +
> + /* Re-enable interrupts. */
> + x86_write_psl(psl);
> +
> + if (force_pending) {
> + /* Force processing of any pending interrupts. */
> + splx(splhigh());
> + }
> +}
> +
> +static void
> +intr_mask_internal(struct intrhand * const ih, const bool mask)
> +{
> +
> + /*
> + * Call out to the remote CPU to update its interrupt state.
> + * Only make RPCs if the APs are up and running.
> + */
> + mutex_enter(&cpu_lock);
> + struct cpu_info * const ci = ih->ih_cpu;
> + void * const mask_arg = (void *)(uintptr_t)mask;
> + if (ci == curcpu() || !mp_online) {
> + intr_mask_xcall(ih, mask_arg);
> + } else {
> + const uint64_t where =
> + xc_unicast(0, intr_mask_xcall, ih, mask_arg, ci);
> + xc_wait(where);
> + }
> + mutex_exit(&cpu_lock);
> +}
> +
> +void
> +intr_mask(struct intrhand *ih)
> +{
> +
> + if (cpu_intr_p()) {
> + /*
> + * Special case of calling intr_mask() from an interrupt
> + * handler: we MUST be called from the bound CPU for this
> + * interrupt (presumably from a handler we're about to
> + * mask).
> + *
> + * We can't take the cpu_lock in this case, and we must
> + * therefore be extra careful.
> + */
> + struct cpu_info * const ci = ih->ih_cpu;
> + KASSERT(ci == curcpu() || !mp_online);
> + intr_mask_xcall(ih, (void *)(uintptr_t)true);
> + return;
> + }
> +
> + intr_mask_internal(ih, true);
> +}
> +
> +void
> +intr_unmask(struct intrhand *ih)
> +{
> +
> + /*
> + * This is not safe to call from an interrupt context because
> + * we don't want to accidentally unmask an interrupt source
> + * that's masked because it's being serviced.
> + */
> + KASSERT(!cpu_intr_p());
> + intr_mask_internal(ih, false);
> +}
> +
> /*
> * Called on bound CPU to handle intr_disestablish().
> *
> @@ -1036,7 +1181,7 @@ intr_disestablish_xcall(void *arg1, void *arg2)
> if (source->is_handlers == NULL)
> (*pic->pic_delroute)(pic, ci, ih->ih_pin, idtvec,
> source->is_type);
> - else
> + else if (source->is_mask_count == 0)
> (*pic->pic_hwunmask)(pic, ih->ih_pin);
>
> /* Re-enable interrupts. */
> @@ -1851,10 +1996,14 @@ intr_set_affinity(struct intrsource *isp, const kcpuset_t *cpuset)
> return err;
> }
>
> + /* Prevent intr_unmask() from reenabling the source at the hw. */
> + isp->is_distribute_pending = true;
> +
> pin = isp->is_pin;
> (*pic->pic_hwmask)(pic, pin); /* for ci_ipending check */
> - while (oldci->ci_ipending & (1 << oldslot))
> + while (oldci->ci_ipending & (1 << oldslot)) {
> (void)kpause("intrdist", false, 1, &cpu_lock);
> + }
>
> kpreempt_disable();
>
> @@ -1889,9 +2038,16 @@ intr_set_affinity(struct intrsource *isp, const kcpuset_t *cpuset)
> isp->is_active_cpu = newci->ci_cpuid;
> (*pic->pic_addroute)(pic, newci, pin, idt_vec, isp->is_type);
>
> - kpreempt_enable();
> + isp->is_distribute_pending = false;
> + if (newci == curcpu() || !mp_online) {
> + intr_hwunmask_xcall(ih, NULL);
> + } else {
> + uint64_t where;
> + where = xc_unicast(0, intr_hwunmask_xcall, ih, NULL, newci);
> + xc_wait(where);
> + }
>
> - (*pic->pic_hwunmask)(pic, pin);
> + kpreempt_enable();
>
> return err;
> }
> diff --git a/sys/dev/acpi/acpi_intr.h b/sys/dev/acpi/acpi_intr.h
> index 5d2d8d47c0a3..6001204a8d2b 100644
> --- a/sys/dev/acpi/acpi_intr.h
> +++ b/sys/dev/acpi/acpi_intr.h
> @@ -28,7 +28,15 @@
> * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
> * POSSIBILITY OF SUCH DAMAGE.
> */
> +
> +#ifndef _DEV_ACPI_ACPI_INTR_H_
> +#define _DEV_ACPI_ACPI_INTR_H_
> +
> void * acpi_intr_establish(device_t, uint64_t, int, bool,
> int (*intr)(void *), void *, const char *);
> +void acpi_intr_mask(void *);
> +void acpi_intr_unmask(void *);
> void acpi_intr_disestablish(void *);
> const char * acpi_intr_string(void *, char *, size_t len);
> +
> +#endif /* _DEV_ACPI_ACPI_INTR_H_ */
> diff --git a/sys/dev/acpi/acpi_util.c b/sys/dev/acpi/acpi_util.c
> index aac787211eee..fdec7ad89be9 100644
> --- a/sys/dev/acpi/acpi_util.c
> +++ b/sys/dev/acpi/acpi_util.c
> @@ -590,6 +590,22 @@ end:
> return aih;
> }
>
> +void
> +acpi_intr_mask(void *c)
> +{
> + struct acpi_irq_handler * const aih = c;
> +
> + acpi_md_intr_mask(aih->aih_ih);
> +}
> +
> +void
> +acpi_intr_unmask(void *c)
> +{
> + struct acpi_irq_handler * const aih = c;
> +
> + acpi_md_intr_unmask(aih->aih_ih);
> +}
> +
> void
> acpi_intr_disestablish(void *c)
> {
> diff --git a/sys/dev/i2c/ihidev.c b/sys/dev/i2c/ihidev.c
> index 1e0956295795..8d7cb2a51b0d 100644
> --- a/sys/dev/i2c/ihidev.c
> +++ b/sys/dev/i2c/ihidev.c
> @@ -71,6 +71,7 @@ __KERNEL_RCSID(0, "$NetBSD: ihidev.c,v 1.7 2018/11/16 23:05:50 jmcneill Exp $");
> # include "acpica.h"
> #endif
> #if NACPICA > 0
> +#include <dev/acpi/acpivar.h>
> #include <dev/acpi/acpi_intr.h>
> #endif
>
> @@ -109,10 +110,14 @@ static int ihidev_detach(device_t, int);
> CFATTACH_DECL_NEW(ihidev, sizeof(struct ihidev_softc),
> ihidev_match, ihidev_attach, ihidev_detach, NULL);
>
> +static bool ihiddev_intr_init(struct ihidev_softc *);
> +static void ihiddev_intr_fini(struct ihidev_softc *);
> +
> static bool ihidev_suspend(device_t, const pmf_qual_t *);
> static bool ihidev_resume(device_t, const pmf_qual_t *);
> static int ihidev_hid_command(struct ihidev_softc *, int, void *, bool);
> static int ihidev_intr(void *);
> +static void ihidev_softintr(void *);
> static int ihidev_reset(struct ihidev_softc *, bool);
> static int ihidev_hid_desc_parse(struct ihidev_softc *);
>
> @@ -204,18 +209,9 @@ ihidev_attach(device_t parent, device_t self, void *aux)
> repsz));
> }
> sc->sc_ibuf = kmem_zalloc(sc->sc_isize, KM_NOSLEEP);
> -#if NACPICA > 0
> - {
> - char buf[100];
> -
> - sc->sc_ih = acpi_intr_establish(self, sc->sc_phandle, IPL_TTY,
> - false, ihidev_intr, sc, device_xname(self));
> - if (sc->sc_ih == NULL)
> - aprint_error_dev(self, "can't establish interrupt\n");
> - aprint_normal_dev(self, "interrupting at %s\n",
> - acpi_intr_string(sc->sc_ih, buf, sizeof(buf)));
> + if (! ihiddev_intr_init(sc)) {
> + return;
> }
> -#endif
>
> iha.iaa = ia;
> iha.parent = sc;
> @@ -262,10 +258,7 @@ ihidev_detach(device_t self, int flags)
> struct ihidev_softc *sc = device_private(self);
>
> mutex_enter(&sc->sc_intr_lock);
> -#if NACPICA > 0
> - if (sc->sc_ih != NULL)
> - acpi_intr_disestablish(sc->sc_ih);
> -#endif
> + ihiddev_intr_fini(sc);
> if (ihidev_hid_command(sc, I2C_HID_CMD_SET_POWER,
> &I2C_HID_POWER_OFF, true))
> aprint_error_dev(sc->sc_dev, "failed to power down\n");
> @@ -651,31 +644,110 @@ ihidev_hid_desc_parse(struct ihidev_softc *sc)
> return (0);
> }
>
> +static bool
> +ihiddev_intr_init(struct ihidev_softc *sc)
> +{
> +#if NACPICA > 0
> + ACPI_HANDLE hdl = (void *)(uintptr_t)sc->sc_phandle;
> + struct acpi_resources res;
> + ACPI_STATUS rv;
> + char buf[100];
> +
> + rv = acpi_resource_parse(sc->sc_dev, hdl, "_CRS", &res,
> + &acpi_resource_parse_ops_quiet);
> + if (ACPI_FAILURE(rv)) {
> + aprint_error_dev(sc->sc_dev, "can't parse '_CRS'\n");
> + return false;
> + }
> +
> + const struct acpi_irq * const irq = acpi_res_irq(&res, 0);
> + if (irq == NULL) {
> + aprint_error_dev(sc->sc_dev, "no IRQ resource\n");
> + acpi_resource_cleanup(&res);
> + return false;
> + }
> +
> + sc->sc_intr_type =
> + irq->ar_type == ACPI_EDGE_SENSITIVE ? IST_EDGE : IST_LEVEL;
> +
> + acpi_resource_cleanup(&res);
> +
> + sc->sc_ih = acpi_intr_establish(sc->sc_dev, sc->sc_phandle, IPL_TTY,
> + false, ihidev_intr, sc, device_xname(sc->sc_dev));
> + if (sc->sc_ih == NULL) {
> + aprint_error_dev(sc->sc_dev, "can't establish interrupt\n");
> + return false;
> + }
> + aprint_normal_dev(sc->sc_dev, "interrupting at %s\n",
> + acpi_intr_string(sc->sc_ih, buf, sizeof(buf)));
> +
> + sc->sc_sih = softint_establish(SOFTINT_SERIAL, ihidev_softintr, sc);
> + if (sc->sc_sih == NULL) {
> + aprint_error_dev(sc->sc_dev,
> + "can't establish soft interrupt\n");
> + return false;
> + }
> +
> + return true;
> +#else
> + aprint_error_dev(sc->sc_dev, "can't establish interrupt\n");
> + return false;
> +#endif
> +}
> +
> +static void
> +ihiddev_intr_fini(struct ihidev_softc *sc)
> +{
> +#if NACPICA > 0
> + if (sc->sc_ih != NULL) {
> + acpi_intr_disestablish(sc->sc_ih);
> + }
> + if (sc->sc_sih != NULL) {
> + softint_disestablish(sc->sc_sih);
> + }
> +#endif
> +}
> +
> static int
> ihidev_intr(void *arg)
> {
> - struct ihidev_softc *sc = arg;
> + struct ihidev_softc * const sc = arg;
> +
> + mutex_enter(&sc->sc_intr_lock);
> +
> + /*
> + * Schedule our soft interrupt handler. If we're using a level-
> + * triggered interrupt, we have to mask it off while we wait
> + * for service.
> + */
> + softint_schedule(sc->sc_sih);
> + if (sc->sc_intr_type == IST_LEVEL) {
> +#if NACPICA > 0
> + acpi_intr_mask(sc->sc_ih);
> +#endif
> + }
> +
> + mutex_exit(&sc->sc_intr_lock);
> +
> + return 1;
> +}
> +
> +static void
> +ihidev_softintr(void *arg)
> +{
> + struct ihidev_softc * const sc = arg;
> struct ihidev *scd;
> u_int psize;
> int res, i;
> u_char *p;
> u_int rep = 0;
>
> - /*
> - * XXX: force I2C_F_POLL for now to avoid dwiic interrupting
> - * while we are interrupting
> - */
> -
> - mutex_enter(&sc->sc_intr_lock);
> - iic_acquire_bus(sc->sc_tag, I2C_F_POLL);
> -
> + iic_acquire_bus(sc->sc_tag, 0);
> res = iic_exec(sc->sc_tag, I2C_OP_READ_WITH_STOP, sc->sc_addr, NULL, 0,
> - sc->sc_ibuf, sc->sc_isize, I2C_F_POLL);
> -
> - iic_release_bus(sc->sc_tag, I2C_F_POLL);
> - mutex_exit(&sc->sc_intr_lock);
> + sc->sc_ibuf, sc->sc_isize, 0);
> + iic_release_bus(sc->sc_tag, 0);
> if (res != 0)
> - return 1;
> + goto out;
>
> /*
> * 6.1.1 - First two bytes are the packet length, which must be less
> @@ -685,7 +757,7 @@ ihidev_intr(void *arg)
> if (!psize || psize > sc->sc_isize) {
> DPRINTF(("%s: %s: invalid packet size (%d vs. %d)\n",
> sc->sc_dev.dv_xname, __func__, psize, sc->sc_isize));
> - return (1);
> + goto out;
> }
>
> /* 3rd byte is the report id */
> @@ -697,22 +769,30 @@ ihidev_intr(void *arg)
> if (rep >= sc->sc_nrepid) {
> aprint_error_dev(sc->sc_dev, "%s: bad report id %d\n",
> __func__, rep);
> - return (1);
> + goto out;
> }
>
> - DPRINTF(("%s: ihidev_intr: hid input (rep %d):", sc->sc_dev.dv_xname,
> - rep));
> + DPRINTF(("%s: %s: hid input (rep %d):", sc->sc_dev.dv_xname,
> + __func__, rep));
> for (i = 0; i < sc->sc_isize; i++)
> DPRINTF((" %.2x", sc->sc_ibuf[i]));
> DPRINTF(("\n"));
>
> scd = sc->sc_subdevs[rep];
> if (scd == NULL || !(scd->sc_state & IHIDEV_OPEN))
> - return (1);
> + goto out;
>
> scd->sc_intr(scd, p, psize);
>
> - return 1;
> + out:
> + /*
> + * If our interrupt is level-triggered, re-enable it now.
> + */
> + if (sc->sc_intr_type == IST_LEVEL) {
> +#if NACPICA > 0
> + acpi_intr_unmask(sc->sc_ih);
> +#endif
> + }
> }
>
> static int
> diff --git a/sys/dev/i2c/ihidev.h b/sys/dev/i2c/ihidev.h
> index 6685c307fe6e..911cfea6259c 100644
> --- a/sys/dev/i2c/ihidev.h
> +++ b/sys/dev/i2c/ihidev.h
> @@ -100,7 +100,9 @@ struct ihidev_softc {
> uint64_t sc_phandle;
>
> void * sc_ih;
> + void * sc_sih;
> kmutex_t sc_intr_lock;
> + int sc_intr_type;
>
> u_int sc_hid_desc_addr;
> union {
>
> -- thorpej
>
Home |
Main Index |
Thread Index |
Old Index