tech-x11 archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

vmwgfx drm driver patches



Hello,

attached is a patch that enables vmwgfx drm driver for NetBSD.

Unfortunately it does not work correctly, but I am out of skills to
make any further progress with it. Hopefully someone can pick up on
this work and make it actually work.

The patch touches genfb and rasops in addition to external/bsd/drm2.
Due to recent rasops changes the patch may not apply cleanly.
There are also quite likely some questionable changes to common drm2
code in the parts that were earlier unused.


The rasops changes are required because writes to the vmwgfx device
framebuffer do not become visible automatically, but the device needs
to be kicked so that it knows the framebuffer is dirty.
For this I added a callback to rasops and genfb.
Maybe there is some better way to implement this.
And the current implementation does not solve framebuffer mmap to userland,
it will not trigger the dirty flushes.


Status of the driver as tested with VirtualBox 6.0.4 with VMSVGA display:
vmwgfx attaches, vmwgfxfb attaches, kernel boot messages are drawn to the
framebuffer and are visible. But after the kernel leaves cold, next
write to the framebuffer memory causes VirtualBox to crash with
VCPU0: Guru Meditation -1607 (VERR_PGM_HANDLER_NOT_FOUND).



Here are the bits I know about the VirtualBox crash:

- crash occurs in rasops_eraserows, when writing to the first word of the
  framebuffer

- but the crash occurs late in boot, and before the crash the framebuffer
  memory has already been used several times

- with serial console, a few printfs in pmap and vtophys call in rasops I
  verified that the framebuffer virtual address mapping and protection is the
  same at the time when framebuffer writes work and when the crash occurs

- framebuffer physical address and VirtualBox log file agree that the
  framebuffer does point to VGA VRAM

So memory mappings are correct and stay correct on the guest,
but at the time of the crash VirtualBox no longer accepts the write to
the memory. To me it seems like something disables the use of
VGA VRAM at the end of autoconfig, perhaps with the first interrupt or softint
after cold, but I couldn't figure out what that could be. printfs in
vmwgfx softint and irq handler did not trigger (using serial console),
so it does not seem to be anything that the vmwgfx driver does, maybe.


Arto
diff --git a/src/sys/dev/rasops/rasops.c b/src/sys/dev/rasops/rasops.c
index 884d4de6..69aa0713 100644
--- a/src/sys/dev/rasops/rasops.c
+++ b/src/sys/dev/rasops/rasops.c
@@ -143,6 +143,13 @@ static int	rasops_allocattr_mono(void *, int, int, int, long *);
 static void	rasops_do_cursor(struct rasops_info *);
 static void	rasops_init_devcmap(struct rasops_info *);
 
+static void rasops_cursor_dirty(void *, int, int, int);
+static void rasops_putchar_dirty(void *, int, int, u_int, long);
+static void rasops_copycols_dirty(void *, int, int, int, int);
+static void rasops_erasecols_dirty(void *, int, int, int, long);
+static void rasops_copyrows_dirty(void *, int, int, int);
+static void rasops_eraserows_dirty(void *, int, int, long);
+
 #if NRASOPS_ROTATION > 0
 static void	rasops_rotate_font(int *, int);
 static void	rasops_copychar(void *, int, int, int, int);
@@ -468,8 +475,17 @@ rasops_reconfig(struct rasops_info *ri, int wantrows, int wantcols)
 		return (-1);
 	}
 
+	if (ri->ri_dirty) {
+		ri->ri_real_ops = ri->ri_ops;
+	    ri->ri_ops.cursor = rasops_cursor_dirty;
+	    ri->ri_ops.putchar = rasops_putchar_dirty;
+	    ri->ri_ops.copycols = rasops_copycols_dirty;
+	    ri->ri_ops.erasecols = rasops_erasecols_dirty;
+	    ri->ri_ops.copyrows = rasops_copyrows_dirty;
+	    ri->ri_ops.eraserows = rasops_eraserows_dirty;
+	}
 #if NRASOPS_ROTATION > 0
-	if (ri->ri_flg & RI_ROTATE_MASK) {
+	else if (ri->ri_flg & RI_ROTATE_MASK) {
 		if (ri->ri_flg & RI_ROTATE_CW) {
 			ri->ri_real_ops = ri->ri_ops;
 			ri->ri_ops.copycols = rasops_copycols_rotated_cw;
@@ -1758,3 +1774,117 @@ rasops_get_cmap(struct rasops_info *ri, uint8_t *palette, size_t bytes)
 	}
 	return 0;
 }
+
+static void
+rasops_cursor_dirty(void *cookie, int on, int row, int col)
+{
+	struct rasops_info *ri = cookie;
+
+    ri->ri_real_ops.cursor(cookie, on, row, col);
+    ri->ri_dirty(ri);
+}
+
+static void
+rasops_putchar_dirty(void *cookie, int row, int col, u_int uc, long attr)
+{
+	struct rasops_info *ri = cookie;
+
+#if NRASOPS_ROTATION > 0
+	if (ri->ri_flg & RI_ROTATE_MASK) {
+		if (ri->ri_flg & RI_ROTATE_CW) {
+			rasops_putchar_rotated_cw(cookie, row, col, uc, attr);
+		} else if (ri->ri_flg & RI_ROTATE_CCW) {
+			rasops_putchar_rotated_ccw(cookie, row, col, uc, attr);
+		} else
+			ri->ri_real_ops.putchar(cookie, row, col, uc, attr);
+	} else
+		ri->ri_real_ops.putchar(cookie, row, col, uc, attr);
+#else
+	ri->ri_real_ops.putchar(cookie, row, col, uc, attr);
+#endif
+    ri->ri_dirty(ri);
+}
+
+static void
+rasops_copycols_dirty(void *cookie, int row, int src, int dst, int num)
+{
+	struct rasops_info *ri = cookie;
+
+#if NRASOPS_ROTATION > 0
+	if (ri->ri_flg & RI_ROTATE_MASK) {
+		if (ri->ri_flg & RI_ROTATE_CW) {
+			rasops_copycols_rotated_cw(cookie, row, src, dst, num);
+		} else if (ri->ri_flg & RI_ROTATE_CCW) {
+			rasops_copycols_rotated_ccw(cookie, row, src, dst, num);
+		} else
+			ri->ri_real_ops.copycols(cookie, row, src, dst, num);
+	} else
+		ri->ri_real_ops.copycols(cookie, row, src, dst, num);
+#else
+	ri->ri_real_ops.copycols(cookie, row, src, dst, num);
+#endif
+    ri->ri_dirty(ri);
+}
+
+static void
+rasops_erasecols_dirty(void *cookie, int row, int col, int num, long attr)
+{
+	struct rasops_info *ri = cookie;
+
+#if NRASOPS_ROTATION > 0
+	if (ri->ri_flg & RI_ROTATE_MASK) {
+		if (ri->ri_flg & RI_ROTATE_CW) {
+			rasops_erasecols_rotated_cw(cookie, row, col, num, attr);
+		} else if (ri->ri_flg & RI_ROTATE_CCW) {
+			rasops_erasecols_rotated_ccw(cookie, row, col, num, attr);
+		} else
+			ri->ri_real_ops.erasecols(cookie, row, col, num, attr);
+	} else
+		ri->ri_real_ops.erasecols(cookie, row, col, num, attr);
+#else
+	ri->ri_real_ops.erasecols(cookie, row, col, num, attr);
+#endif
+    ri->ri_dirty(ri);
+}
+
+static void
+rasops_copyrows_dirty(void *cookie, int src, int dst, int num)
+{
+	struct rasops_info *ri = cookie;
+
+#if NRASOPS_ROTATION > 0
+	if (ri->ri_flg & RI_ROTATE_MASK) {
+		if (ri->ri_flg & RI_ROTATE_CW) {
+			rasops_copyrows_rotated_cw(cookie, src, dst, num);
+		} else if (ri->ri_flg & RI_ROTATE_CCW) {
+			rasops_copyrows_rotated_ccw(cookie, src, dst, num);
+		} else
+			ri->ri_real_ops.copyrows(cookie, src, dst, num);
+	} else
+		ri->ri_real_ops.copyrows(cookie, src, dst, num);
+#else
+	ri->ri_real_ops.copyrows(cookie, src, dst, num);
+#endif
+    ri->ri_dirty(ri);
+}
+
+static void
+rasops_eraserows_dirty(void *cookie, int row, int num, long attr)
+{
+	struct rasops_info *ri = cookie;
+
+#if NRASOPS_ROTATION > 0
+	if (ri->ri_flg & RI_ROTATE_MASK) {
+		if (ri->ri_flg & RI_ROTATE_CW) {
+			rasops_eraserows_rotated_cw(cookie, row, num, attr);
+		} else if (ri->ri_flg & RI_ROTATE_CCW) {
+			rasops_eraserows_rotated_ccw(cookie, row, num, attr);
+		} else
+			ri->ri_real_ops.eraserows(cookie, row, num, attr);
+	} else
+		ri->ri_real_ops.eraserows(cookie, row, num, attr);
+#else
+	ri->ri_real_ops.eraserows(cookie, row, num, attr);
+#endif
+    ri->ri_dirty(ri);
+}
diff --git a/src/sys/dev/rasops/rasops.h b/src/sys/dev/rasops/rasops.h
index fa348af9..3000c2ae 100644
--- a/src/sys/dev/rasops/rasops.h
+++ b/src/sys/dev/rasops/rasops.h
@@ -91,6 +91,7 @@ struct rasops_info {
 	struct	wsdisplay_font ri_optfont;
 	int	ri_wsfcookie;	/* wsfont cookie */
 	void	*ri_hw;		/* driver private data; ignored by rasops */
+    void	*ri_device; /* device private date (vcons abuses ri_hw) */
 	int	ri_crow;	/* cursor row */
 	int	ri_ccol;	/* cursor column */
 	int	ri_flg;		/* various operational flags */
@@ -132,10 +133,11 @@ struct rasops_info {
 	/* Callbacks so we can share some code */
 	void	(*ri_do_cursor)(struct rasops_info *);
 
-#if NRASOPS_ROTATION > 0
-	/* Used to intercept putchar to permit display rotation */
+    /* notify that the screen is dirty */
+    void	(*ri_dirty)(struct rasops_info *);
+
+	/* Used to intercept putchar to permit display rotation or dirty handling */
 	struct	wsdisplay_emulops ri_real_ops;
-#endif
 };
 
 #define DELTA(p, d, cast) ((p) = (cast)((char *)(p) + (d)))
diff --git a/src/sys/dev/wsfb/genfb.c b/src/sys/dev/wsfb/genfb.c
index 82674d08..a3df9e56 100644
--- a/src/sys/dev/wsfb/genfb.c
+++ b/src/sys/dev/wsfb/genfb.c
@@ -90,6 +90,8 @@ static void	genfb_init_palette(struct genfb_softc *);
 static void	genfb_brightness_up(device_t);
 static void	genfb_brightness_down(device_t);
 
+static void genfb_rasops_dirty(struct rasops_info *);
+
 extern const u_char rasops_cmap[768];
 
 static int genfb_cnattach_called = 0;
@@ -593,6 +595,9 @@ genfb_init_screen(void *cookie, struct vcons_screen *scr,
 
 	wantcols = genfb_calc_cols(sc);
 
+	if (sc->sc_ops.genfb_dirty)
+		ri->ri_dirty = genfb_rasops_dirty;
+
 	rasops_init(ri, 0, wantcols);
 	ri->ri_caps = WSSCREEN_WSCOLORS | WSSCREEN_HILIT | WSSCREEN_UNDERLINE |
 		  WSSCREEN_RESIZE;
@@ -601,6 +606,8 @@ genfb_init_screen(void *cookie, struct vcons_screen *scr,
 
 	/* TODO: actually center output */
 	ri->ri_hw = scr;
+    /* it would be nice to use ri_hw, but vcons expects ri_hw to point to itself */
+	ri->ri_device = sc;
 
 #ifdef GENFB_DISABLE_TEXT
 	if (scr == &sc->sc_console_screen && !DISABLESPLASH)
@@ -852,3 +859,11 @@ genfb_disable_polling(device_t dev)
 		vcons_disable_polling(&sc->vd);
 	}
 }
+
+static void
+genfb_rasops_dirty(struct rasops_info *ri)
+{
+	struct genfb_softc *sc = ri->ri_device;
+
+	sc->sc_ops.genfb_dirty(sc);
+}
diff --git a/src/sys/dev/wsfb/genfbvar.h b/src/sys/dev/wsfb/genfbvar.h
index ad35e163..a2999cbe 100644
--- a/src/sys/dev/wsfb/genfbvar.h
+++ b/src/sys/dev/wsfb/genfbvar.h
@@ -62,6 +62,7 @@ struct genfb_ops {
 	int (*genfb_borrow)(void *, bus_addr_t, bus_space_handle_t *);
 	int (*genfb_enable_polling)(void *);
 	int (*genfb_disable_polling)(void *);
+    void (*genfb_dirty)(void *);
 };
 
 struct genfb_colormap_callback {
diff --git a/src/sys/external/bsd/common/include/linux/kernel.h b/src/sys/external/bsd/common/include/linux/kernel.h
index cb53794e..4a1cb329 100644
--- a/src/sys/external/bsd/common/include/linux/kernel.h
+++ b/src/sys/external/bsd/common/include/linux/kernel.h
@@ -44,6 +44,8 @@
 
 #define U16_MAX UINT16_MAX
 #define U32_MAX UINT32_MAX
+#define S32_MAX INT32_MAX
+#define S32_MIN INT32_MIN
 #define U64_MAX UINT64_MAX
 
 #define	oops_in_progress	(panicstr != NULL)
diff --git a/src/sys/external/bsd/drm2/dist/drm/i915/intel_fbdev.c b/src/sys/external/bsd/drm2/dist/drm/i915/intel_fbdev.c
index b7fe0908..347a8b9c 100644
--- a/src/sys/external/bsd/drm2/dist/drm/i915/intel_fbdev.c
+++ b/src/sys/external/bsd/drm2/dist/drm/i915/intel_fbdev.c
@@ -215,8 +215,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
 
 	mutex_lock(&dev->struct_mutex);
 	if (intel_fb &&
-	    (sizes->fb_width > intel_fb->base.width ||
-	     sizes->fb_height > intel_fb->base.height)) {
+        (sizes->surface_width > intel_fb->base.width ||
+         sizes->surface_height > intel_fb->base.height)) {
 		DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d),"
 			      " releasing it\n",
 			      intel_fb->base.width, intel_fb->base.height,
diff --git a/src/sys/external/bsd/drm2/dist/drm/ttm/ttm_bo.c b/src/sys/external/bsd/drm2/dist/drm/ttm/ttm_bo.c
index 44f05967..3f561a63 100644
--- a/src/sys/external/bsd/drm2/dist/drm/ttm/ttm_bo.c
+++ b/src/sys/external/bsd/drm2/dist/drm/ttm/ttm_bo.c
@@ -1644,9 +1644,12 @@ void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
 		unsigned i;
 
 		mutex_enter(bo->uvmobj.vmobjlock);
-		for (i = 0; i < bo->ttm->num_pages; i++)
-			pmap_page_protect(&bo->ttm->pages[i]->p_vmp,
-			    VM_PROT_NONE);
+		for (i = 0; i < bo->ttm->num_pages; i++) {
+            if (bo->ttm->pages[i]) {
+    			pmap_page_protect(&bo->ttm->pages[i]->p_vmp,
+	    		    VM_PROT_NONE);
+            }
+        }
 		mutex_exit(bo->uvmobj.vmobjlock);
 	}
 #else
diff --git a/src/sys/external/bsd/drm2/dist/drm/ttm/ttm_object.c b/src/sys/external/bsd/drm2/dist/drm/ttm/ttm_object.c
index 6f911470..4f4fa639 100644
--- a/src/sys/external/bsd/drm2/dist/drm/ttm/ttm_object.c
+++ b/src/sys/external/bsd/drm2/dist/drm/ttm/ttm_object.c
@@ -42,6 +42,9 @@
  * and release on file close.
  */
 
+#ifdef __NetBSD__
+/* modified to not use RCU; read operations are also locked */
+#endif
 
 /**
  * struct ttm_object_file
@@ -70,6 +73,9 @@ __KERNEL_RCSID(0, "$NetBSD: ttm_object.c,v 1.2 2018/08/27 04:58:37 riastradh Exp
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/atomic.h>
+#ifdef __NetBSD__
+#include <linux/err.h>
+#endif
 
 struct ttm_object_file {
 	struct ttm_object_device *tdev;
@@ -239,7 +245,11 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
 	struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
 	int ret;
 
+#ifdef __NetBSD__
+    spin_lock(&tfile->lock);
+#else
 	rcu_read_lock();
+#endif
 	ret = drm_ht_find_item_rcu(ht, key, &hash);
 
 	if (likely(ret == 0)) {
@@ -247,7 +257,11 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
 		if (!kref_get_unless_zero(&base->refcount))
 			base = NULL;
 	}
+#ifdef __NetBSD__
+    spin_unlock(&tfile->lock);
+#else
 	rcu_read_unlock();
+#endif
 
 	return base;
 }
@@ -261,7 +275,11 @@ ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
 	struct drm_open_hash *ht = &tdev->object_hash;
 	int ret;
 
+#ifdef __NetBSD__
+    spin_lock(&tdev->object_lock);
+#else
 	rcu_read_lock();
+#endif
 	ret = drm_ht_find_item_rcu(ht, key, &hash);
 
 	if (likely(ret == 0)) {
@@ -269,7 +287,11 @@ ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
 		if (!kref_get_unless_zero(&base->refcount))
 			base = NULL;
 	}
+#ifdef __NetBSD__
+    spin_unlock(&tdev->object_lock);
+#else
 	rcu_read_unlock();
+#endif
 
 	return base;
 }
@@ -292,7 +314,11 @@ bool ttm_ref_object_exists(struct ttm_object_file *tfile,
 	struct drm_hash_item *hash;
 	struct ttm_ref_object *ref;
 
+#ifdef __NetBSD__
+    spin_lock(&tfile->lock);
+#else
 	rcu_read_lock();
+#endif
 	if (unlikely(drm_ht_find_item_rcu(ht, base->hash.key, &hash) != 0))
 		goto out_false;
 
@@ -308,15 +334,27 @@ bool ttm_ref_object_exists(struct ttm_object_file *tfile,
 	/*
 	 * Verify that the ref->obj pointer was actually valid!
 	 */
+#ifdef __NetBSD__
+	if (unlikely(ref->kref.kr_count == 0))
+#else
 	rmb();
 	if (unlikely(atomic_read(&ref->kref.refcount) == 0))
+#endif
 		goto out_false;
 
+#ifdef __NetBSD__
+    spin_unlock(&tfile->lock);
+#else
 	rcu_read_unlock();
+#endif
 	return true;
 
  out_false:
+#ifdef __NetBSD__
+    spin_unlock(&tfile->lock);
+#else
 	rcu_read_unlock();
+#endif
 	return false;
 }
 EXPORT_SYMBOL(ttm_ref_object_exists);
@@ -339,18 +377,30 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
 		*existed = true;
 
 	while (ret == -EINVAL) {
+#ifdef __NetBSD__
+        spin_lock(&tfile->lock);
+#else
 		rcu_read_lock();
+#endif
 		ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash);
 
 		if (ret == 0) {
 			ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
 			if (kref_get_unless_zero(&ref->kref)) {
+#ifdef __NetBSD__
+                spin_unlock(&tfile->lock);
+#else
 				rcu_read_unlock();
+#endif
 				break;
 			}
 		}
 
+#ifdef __NetBSD__
+        spin_unlock(&tfile->lock);
+#else
 		rcu_read_unlock();
+#endif
 		if (require_existed)
 			return -EPERM;
 
@@ -405,15 +455,21 @@ static void ttm_ref_object_release(struct kref *kref)
 	ht = &tfile->ref_hash[ref->ref_type];
 	(void)drm_ht_remove_item_rcu(ht, &ref->hash);
 	list_del(&ref->head);
+#ifndef __NetBSD__
 	spin_unlock(&tfile->lock);
+#endif
 
 	if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
 		base->ref_obj_release(base, ref->ref_type);
 
 	ttm_base_object_unref(&ref->obj);
 	ttm_mem_global_free(mem_glob, sizeof(*ref));
+#ifdef __NetBSD__
+    kfree(ref);
+#else
 	kfree_rcu(ref, rcu_head);
 	spin_lock(&tfile->lock);
+#endif
 }
 
 int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
@@ -461,8 +517,8 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
 	for (i = 0; i < TTM_REF_NUM; ++i)
 		drm_ht_remove(&tfile->ref_hash[i]);
 
-	spin_unlock(&tfile->lock);
 	ttm_object_file_unref(&tfile);
+	spin_unlock(&tfile->lock);
 }
 EXPORT_SYMBOL(ttm_object_file_release);
 
@@ -522,8 +578,13 @@ ttm_object_device_init(struct ttm_mem_global *mem_glob,
 	tdev->ops = *ops;
 	tdev->dmabuf_release = tdev->ops.release;
 	tdev->ops.release = ttm_prime_dmabuf_release;
+#ifdef __NetBSD__
+    /* this is only for memory usage accounting ? */
+	tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf));
+#else
 	tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
 		ttm_round_pot(sizeof(struct file));
+#endif
 	return tdev;
 
 out_no_object_hash:
@@ -561,7 +622,18 @@ EXPORT_SYMBOL(ttm_object_device_release);
  */
 static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
 {
+#ifdef __NetBSD__
+    /*
+     * TODO; this is not the same thing as linux branch ?
+     * Is it even possible to have db_refcnt zero here?
+     */
+	unsigned refcnt __diagused;
+    refcnt = atomic_inc_uint_nv(&dmabuf->db_refcnt);
+    KASSERT(refcnt > 1);
+    return true;
+#else
 	return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
+#endif
 }
 
 /**
@@ -582,7 +654,11 @@ static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
 	*p_base = NULL;
 	prime = container_of(base, struct ttm_prime_object, base);
 	BUG_ON(prime->dma_buf != NULL);
+#ifdef __NetBSD__
+	linux_mutex_destroy(&prime->mutex);
+#else
 	mutex_destroy(&prime->mutex);
+#endif
 	if (prime->refcount_release)
 		prime->refcount_release(&base);
 }
@@ -761,7 +837,11 @@ int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
 			  void (*ref_obj_release) (struct ttm_base_object *,
 						   enum ttm_ref_type ref_type))
 {
+#ifdef __NetBSD__
+	linux_mutex_init(&prime->mutex);
+#else
 	mutex_init(&prime->mutex);
+#endif
 	prime->size = PAGE_ALIGN(size);
 	prime->real_type = type;
 	prime->dma_buf = NULL;
diff --git a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/device_include/svga_reg.h b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/device_include/svga_reg.h
index 4d60ef6c..933d1ba9 100644
--- a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/device_include/svga_reg.h
+++ b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/device_include/svga_reg.h
@@ -33,7 +33,9 @@
 
 #ifndef _SVGA_REG_H_
 #define _SVGA_REG_H_
+#ifndef __NetBSD__
 #include <linux/pci_ids.h>
+#endif  /* __NetBSD__ */
 
 #define INCLUDE_ALLOW_MODULE
 #define INCLUDE_ALLOW_USERLEVEL
diff --git a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/device_include/vmware_pack_begin.h b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/device_include/vmware_pack_begin.h
index fa23d83f..1920f74f 100644
--- a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/device_include/vmware_pack_begin.h
+++ b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/device_include/vmware_pack_begin.h
@@ -24,4 +24,6 @@
  * SOFTWARE.
  *
  **********************************************************/
+#ifndef __NetBSD__
 #include <linux/compiler.h>
+#endif
diff --git a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_buffer.c b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_buffer.c
index 14827974..5d12f932 100644
--- a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_buffer.c
+++ b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_buffer.c
@@ -34,6 +34,10 @@ __KERNEL_RCSID(0, "$NetBSD: vmwgfx_buffer.c,v 1.2 2018/08/27 04:58:37 riastradh
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_placement.h>
 #include <drm/ttm/ttm_page_alloc.h>
+#ifdef __NetBSD__
+#include <drm/bus_dma_hacks.h>
+#include <sys/file.h>
+#endif
 
 static struct ttm_place vram_placement_flags = {
 	.fpfn = 0,
@@ -224,7 +228,9 @@ struct vmw_ttm_tt {
 	int gmr_id;
 	struct vmw_mob *mob;
 	int mem_type;
+#ifndef __NetBSD__
 	struct sg_table sgt;
+#endif
 	struct vmw_sg_table vsgt;
 	uint64_t sg_alloc_size;
 	bool mapped;
@@ -246,11 +252,12 @@ static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
 	return ++(viter->i) < viter->num_pages;
 }
 
+#ifndef __NetBSD__
 static bool __vmw_piter_sg_next(struct vmw_piter *viter)
 {
 	return __sg_page_iter_next(&viter->iter);
 }
-
+#endif
 
 /**
  * Helper functions to return a pointer to the current page.
@@ -266,10 +273,12 @@ static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
 	return viter->pages[viter->i];
 }
 
+#ifndef __NetBSD__
 static struct page *__vmw_piter_sg_page(struct vmw_piter *viter)
 {
 	return sg_page_iter_page(&viter->iter);
 }
+#endif
 
 
 /**
@@ -288,13 +297,19 @@ static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
 
 static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
 {
+#ifdef __NetBSD__
+    return viter->addrs->dm_segs[viter->i].ds_addr;
+#else
 	return viter->addrs[viter->i];
+#endif
 }
 
+#ifndef __NetBSD__
 static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
 {
 	return sg_page_iter_dma_address(&viter->iter);
 }
+#endif
 
 
 /**
@@ -320,12 +335,17 @@ void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
 		viter->pages = vsgt->pages;
 		break;
 	case vmw_dma_alloc_coherent:
+#ifdef __NetBSD__
+	case vmw_dma_map_populate:
+	case vmw_dma_map_bind:
+#endif
 		viter->next = &__vmw_piter_non_sg_next;
 		viter->dma_address = &__vmw_piter_dma_addr;
 		viter->page = &__vmw_piter_non_sg_page;
 		viter->addrs = vsgt->addrs;
 		viter->pages = vsgt->pages;
 		break;
+#ifndef __NetBSD__
 	case vmw_dma_map_populate:
 	case vmw_dma_map_bind:
 		viter->next = &__vmw_piter_sg_next;
@@ -334,6 +354,7 @@ void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
 		__sg_page_iter_start(&viter->iter, vsgt->sgt->sgl,
 				     vsgt->sgt->orig_nents, p_offset);
 		break;
+#endif
 	default:
 		BUG();
 	}
@@ -349,11 +370,16 @@ void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
  */
 static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
 {
+#ifdef __NetBSD__
+
+    bus_dmamap_unload(vmw_tt->dev_priv->dev->dmat, vmw_tt->dma_ttm.dma_address);
+#else
 	struct device *dev = vmw_tt->dev_priv->dev->dev;
 
 	dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
 		DMA_BIDIRECTIONAL);
 	vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
+#endif
 }
 
 /**
@@ -371,6 +397,13 @@ static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
  */
 static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
 {
+#ifdef __NetBSD__
+
+    return -bus_dmamap_load_pglist(vmw_tt->dev_priv->dev->dmat,
+            vmw_tt->dma_ttm.dma_address, &vmw_tt->dma_ttm.ttm.pglist, 
+            vmw_tt->dma_ttm.ttm.num_pages << PAGE_SHIFT,
+            BUS_DMA_WAITOK);
+#else
 	struct device *dev = vmw_tt->dev_priv->dev->dev;
 	int ret;
 
@@ -382,6 +415,7 @@ static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
 	vmw_tt->sgt.nents = ret;
 
 	return 0;
+#endif
 }
 
 /**
@@ -397,13 +431,19 @@ static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
 static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
 {
 	struct vmw_private *dev_priv = vmw_tt->dev_priv;
+#ifndef __NetBSD__
 	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
+#endif
 	struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
+#ifndef __NetBSD__
 	struct vmw_piter iter;
 	dma_addr_t old;
+#endif
 	int ret = 0;
+#ifndef __NetBSD__
 	static size_t sgl_size;
 	static size_t sgt_size;
+#endif
 
 	if (vmw_tt->mapped)
 		return 0;
@@ -412,11 +452,14 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
 	vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
 	vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
 	vsgt->addrs = vmw_tt->dma_ttm.dma_address;
+#ifndef __NetBSD__
 	vsgt->sgt = &vmw_tt->sgt;
+#endif
 
 	switch (dev_priv->map_mode) {
 	case vmw_dma_map_bind:
 	case vmw_dma_map_populate:
+#ifndef __NetBSD__
 		if (unlikely(!sgl_size)) {
 			sgl_size = ttm_round_pot(sizeof(struct scatterlist));
 			sgt_size = ttm_round_pot(sizeof(struct sg_table));
@@ -443,7 +486,7 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
 			ttm_mem_global_free(glob, over_alloc);
 			vmw_tt->sg_alloc_size -= over_alloc;
 		}
-
+#endif
 		ret = vmw_ttm_map_for_dma(vmw_tt);
 		if (unlikely(ret != 0))
 			goto out_map_fail;
@@ -453,6 +496,9 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
 		break;
 	}
 
+#ifdef __NetBSD__
+    vmw_tt->vsgt.num_regions = vmw_tt->dma_ttm.dma_address->dm_nsegs;
+#else
 	old = ~((dma_addr_t) 0);
 	vmw_tt->vsgt.num_regions = 0;
 	for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
@@ -462,15 +508,17 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
 			vmw_tt->vsgt.num_regions++;
 		old = cur;
 	}
-
+#endif
 	vmw_tt->mapped = true;
 	return 0;
 
 out_map_fail:
+#ifndef __NetBSD__
 	sg_free_table(vmw_tt->vsgt.sgt);
 	vmw_tt->vsgt.sgt = NULL;
 out_sg_alloc_fail:
 	ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
+#endif
 	return ret;
 }
 
@@ -487,17 +535,25 @@ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
 {
 	struct vmw_private *dev_priv = vmw_tt->dev_priv;
 
+#ifdef __NetBSD__
+	if (!vmw_tt->vsgt.num_regions)
+#else
 	if (!vmw_tt->vsgt.sgt)
+#endif
 		return;
 
 	switch (dev_priv->map_mode) {
 	case vmw_dma_map_bind:
 	case vmw_dma_map_populate:
 		vmw_ttm_unmap_from_dma(vmw_tt);
+#ifdef __NetBSD__
+        vmw_tt->vsgt.num_regions = 0;
+#else
 		sg_free_table(vmw_tt->vsgt.sgt);
 		vmw_tt->vsgt.sgt = NULL;
 		ttm_mem_global_free(vmw_mem_glob(dev_priv),
 				    vmw_tt->sg_alloc_size);
+#endif
 		break;
 	default:
 		break;
@@ -641,13 +697,18 @@ static int vmw_ttm_populate(struct ttm_tt *ttm)
 {
 	struct vmw_ttm_tt *vmw_tt =
 		container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+#ifndef __NetBSD__
 	struct vmw_private *dev_priv = vmw_tt->dev_priv;
 	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
 	int ret;
+#endif
 
 	if (ttm->state != tt_unpopulated)
 		return 0;
 
+#ifdef __NetBSD__
+    return ttm_bus_dma_populate(&vmw_tt->dma_ttm);
+#else
 	if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
 		size_t size =
 			ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
@@ -662,14 +723,17 @@ static int vmw_ttm_populate(struct ttm_tt *ttm)
 		ret = ttm_pool_populate(ttm);
 
 	return ret;
+#endif
 }
 
 static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
 {
 	struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
 						 dma_ttm.ttm);
+#ifndef __NetBSD__
 	struct vmw_private *dev_priv = vmw_tt->dev_priv;
 	struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
+#endif
 
 
 	if (vmw_tt->mob) {
@@ -678,6 +742,9 @@ static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
 	}
 
 	vmw_ttm_unmap_dma(vmw_tt);
+#ifdef __NetBSD__
+    ttm_bus_dma_unpopulate(&vmw_tt->dma_ttm);
+#else
 	if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
 		size_t size =
 			ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
@@ -686,6 +753,7 @@ static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
 		ttm_mem_global_free(glob, size);
 	} else
 		ttm_pool_unpopulate(ttm);
+#endif
 }
 
 static struct ttm_backend_func vmw_ttm_func = {
@@ -777,7 +845,11 @@ static void vmw_evict_flags(struct ttm_buffer_object *bo,
 static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 {
 	struct ttm_object_file *tfile =
+#ifdef __NetBSD__
+		vmw_fpriv((struct drm_file *)filp->f_data)->tfile;
+#else
 		vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
+#endif
 
 	return vmw_user_dmabuf_verify_access(bo, tfile);
 }
diff --git a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_cmdbuf.c b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_cmdbuf.c
index e01241c2..16e7e8b6 100644
--- a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -38,8 +38,104 @@ __KERNEL_RCSID(0, "$NetBSD: vmwgfx_cmdbuf.c,v 1.2 2018/08/27 04:58:37 riastradh
  * multiple of the DMA pool allocation size.
  */
 #define VMW_CMDBUF_INLINE_ALIGN 64
+#ifdef __NetBSD__
+#define VMW_CMDBUF_INLINE_SIZE \
+	(1024 - round_up(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
+#else
 #define VMW_CMDBUF_INLINE_SIZE \
 	(1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
+#endif
+
+#ifdef __NetBSD__
+
+/*
+ * XXX: not correct?
+ * the affected locks are taken from softint but they may be blocked for
+ * a "long" period by thread context, e.g. vmw_cmdbuf_man_idle ?
+ * However, _bh apparently blocks whole softirq machinery, so is it any better?
+ */
+
+#define spin_lock_bh spin_lock
+#define spin_unlock_bh spin_unlock
+
+#endif
+
+#ifdef __NetBSD__
+
+/* XXX simple dma_pool implementation */
+
+struct dma_pool {
+    bus_dma_tag_t dmat;
+    size_t alloc_size;
+    size_t align;
+};
+
+/* XXX */
+struct vmwgfx_softc {
+    device_t sc_dev;
+    struct pci_dev sc_pci_dev;
+    struct drm_device *sc_drm_dev;
+};
+
+static struct dma_pool *
+dma_pool_create(const char *name, struct drm_device *device, size_t alloc_size,
+        size_t align, size_t allocation)
+{
+    struct dma_pool *pool = kmem_alloc(sizeof(struct dma_pool), KM_NOSLEEP);
+    if (pool == NULL) {
+        return NULL;
+    }
+
+    pool->dmat = device->bus_dmat;
+    pool->alloc_size = alloc_size;
+    pool->align = align;
+
+    return pool;
+}
+
+static void
+dma_pool_destroy(struct dma_pool *pool)
+{
+
+    kmem_free(pool, sizeof(struct dma_pool));
+}
+
+static void *
+dma_pool_alloc(struct dma_pool *pool, int mem_flags, dma_addr_t *handle)
+{
+    bus_dma_segment_t seg;
+    int nseg, ret;
+    void *vaddr;
+
+    ret = bus_dmamem_alloc(pool->dmat, pool->alloc_size, pool->align, PAGE_SIZE,
+            &seg, 1, &nseg, BUS_DMA_NOWAIT);
+    if (ret)
+        return NULL;
+
+    ret = bus_dmamem_map(pool->dmat, &seg, 1, pool->alloc_size, &vaddr,
+            BUS_DMA_NOWAIT);
+    if (ret) {
+        bus_dmamem_free(pool->dmat, &seg, 1);
+        return NULL;
+    }
+
+    *handle = seg.ds_addr;
+    return vaddr;
+}
+
+static void
+dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t handle)
+{
+    bus_dma_segment_t seg;
+
+    seg.ds_addr = handle;
+    seg.ds_len = pool->alloc_size;
+
+    bus_dmamem_unmap(pool->dmat, vaddr, pool->alloc_size);
+    bus_dmamem_free(pool->dmat, &seg, 1);
+}
+
+#endif
 
 /**
  * struct vmw_cmdbuf_context - Command buffer context queues
@@ -121,7 +217,11 @@ struct vmw_cmdbuf_man {
 	spinlock_t lock;
 	struct dma_pool *headers;
 	struct dma_pool *dheaders;
+#ifdef __NetBSD__
+    void *tasklet;
+#else
 	struct tasklet_struct tasklet;
+#endif
 	wait_queue_head_t alloc_queue;
 	wait_queue_head_t idle_queue;
 	bool irq_on;
@@ -252,7 +352,9 @@ static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
 {
 	struct vmw_cmdbuf_man *man = header->man;
 
+#ifndef __NetBSD__
 	lockdep_assert_held_once(&man->lock);
+#endif
 
 	if (header->inline_space) {
 		vmw_cmdbuf_header_inline_free(header);
@@ -482,7 +584,11 @@ static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
  * command buffer processor to free finished buffers and submit any
  * queued buffers to hardware.
  */
+#ifdef __NetBSD__
+static void vmw_cmdbuf_man_tasklet(void *data)
+#else
 static void vmw_cmdbuf_man_tasklet(unsigned long data)
+#endif
 {
 	struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
 
@@ -540,7 +646,9 @@ static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
 	bool idle = false;
 	int i;
 
+#ifndef __NetBSD__  /* lock is taken in vmw_cmdbuf_idle */
 	spin_lock_bh(&man->lock);
+#endif
 	vmw_cmdbuf_man_process(man);
 	for_each_cmdbuf_ctx(man, i, ctx) {
 		if (!list_empty(&ctx->submitted) ||
@@ -552,7 +660,9 @@ static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
 	idle = list_empty(&man->error);
 
 out_unlock:
+#ifndef __NetBSD__
 	spin_unlock_bh(&man->lock);
+#endif
 
 	return idle;
 }
@@ -634,6 +744,29 @@ int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
 			       SVGA_IRQFLAG_COMMAND_BUFFER,
 			       &man->dev_priv->cmdbuf_waiters);
 
+#ifdef __NetBSD__
+    spin_lock(&man->lock);
+    while (!vmw_cmdbuf_man_idle(man, true)) {
+        if (interruptible) {
+            ret = cv_timedwait_sig(&man->idle_queue, &man->lock.sl_lock, timeout);
+            if (ret) {
+                if (ret == EWOULDBLOCK) {
+                    ret = 0;
+                } else { 
+                    ret = -ERESTARTSYS;
+                }
+                break;
+            }
+        } else {
+            ret = cv_timedwait(&man->idle_queue, &man->lock.sl_lock, timeout);
+            if (ret) {
+                ret = 0;
+                break;
+            }
+        }
+        /* TODO: reduce timeout if we loop */
+    }
+#else
 	if (interruptible) {
 		ret = wait_event_interruptible_timeout
 			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
@@ -643,6 +776,7 @@ int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
 			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
 			 timeout);
 	}
+#endif
 	vmw_generic_waiter_remove(man->dev_priv,
 				  SVGA_IRQFLAG_COMMAND_BUFFER,
 				  &man->dev_priv->cmdbuf_waiters);
@@ -652,6 +786,9 @@ int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
 		else
 			ret = 0;
 	}
+#ifdef __NetBSD__
+    spin_unlock(&man->lock);
+#endif
 	if (ret > 0)
 		ret = 0;
 
@@ -738,6 +875,22 @@ static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
 			       SVGA_IRQFLAG_COMMAND_BUFFER,
 			       &man->dev_priv->cmdbuf_waiters);
 
+#ifdef __NetBSD__
+    while (!vmw_cmdbuf_try_alloc(man, &info)) {
+        if (interruptible) {
+            int ret = cv_wait_sig(&man->alloc_queue, &man->space_mutex.mtx_lock);
+            if (ret) {
+    			vmw_generic_waiter_remove
+    				(man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
+    				 &man->dev_priv->cmdbuf_waiters);
+    			mutex_unlock(&man->space_mutex);
+                return -ERESTARTSYS;
+            }
+        } else {
+            cv_wait(&man->alloc_queue, &man->space_mutex.mtx_lock);
+        }
+    }
+#else
 	if (interruptible) {
 		int ret;
 
@@ -753,6 +906,7 @@ static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
 	} else {
 		wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
 	}
+#endif
 	vmw_generic_waiter_remove(man->dev_priv,
 				  SVGA_IRQFLAG_COMMAND_BUFFER,
 				  &man->dev_priv->cmdbuf_waiters);
@@ -1045,7 +1199,11 @@ void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man)
 	if (!man)
 		return;
 
+#ifdef __NetBSD__
+    softint_schedule(man->tasklet);
+#else
 	tasklet_schedule(&man->tasklet);
+#endif
 }
 
 /**
@@ -1130,14 +1288,35 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
 	struct vmw_private *dev_priv = man->dev_priv;
 	bool dummy;
 	int ret;
+#ifdef __NetBSD__
+    bus_dma_segment_t dmaseg;
+    int ndmaseg;
+#endif
 
 	if (man->has_pool)
 		return -EINVAL;
 
 	/* First, try to allocate a huge chunk of DMA memory */
 	size = PAGE_ALIGN(size);
+#ifdef __NetBSD__
+    ret = bus_dmamem_alloc(dev_priv->dev->bus_dmat, size, 0, 0, &dmaseg, 1,
+            &ndmaseg, BUS_DMA_NOWAIT /* XXX ? */);
+    if (ret == 0) {
+        ret = bus_dmamem_map(dev_priv->dev->bus_dmat, &dmaseg, 1, size,
+                (void *)&man->map, BUS_DMA_COHERENT | BUS_DMA_NOWAIT);
+        if (ret == 0) {
+            man->handle = dmaseg.ds_addr;
+        } else {
+            bus_dmamem_free(dev_priv->dev->bus_dmat, &dmaseg, 1);
+            man->map = NULL;
+        }
+    } else {
+        man->map = NULL;
+    }
+#else
 	man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
 				      &man->handle, GFP_KERNEL);
+#endif
 	if (man->map) {
 		man->using_mob = false;
 	} else {
@@ -1214,7 +1393,11 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
 		return ERR_PTR(-ENOMEM);
 
 	man->headers = dma_pool_create("vmwgfx cmdbuf",
+#ifdef __NetBSD__
+				       dev_priv->dev,
+#else
 				       &dev_priv->dev->pdev->dev,
+#endif
 				       sizeof(SVGACBHeader),
 				       64, PAGE_SIZE);
 	if (!man->headers) {
@@ -1223,7 +1406,11 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
 	}
 
 	man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
-					&dev_priv->dev->pdev->dev,
+#ifdef __NetBSD__
+				    dev_priv->dev,
+#else
+				    &dev_priv->dev->pdev->dev,
+#endif
 					sizeof(struct vmw_cmdbuf_dheader),
 					64, PAGE_SIZE);
 	if (!man->dheaders) {
@@ -1236,13 +1423,33 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
 
 	INIT_LIST_HEAD(&man->error);
 	spin_lock_init(&man->lock);
+#ifdef __NetBSD__
+	linux_mutex_init(&man->cur_mutex);
+	linux_mutex_init(&man->space_mutex);
+#else
 	mutex_init(&man->cur_mutex);
 	mutex_init(&man->space_mutex);
+#endif
+#ifdef __NetBSD__
+    man->tasklet = softint_establish(SOFTINT_BIO | SOFTINT_MPSAFE,
+            vmw_cmdbuf_man_tasklet, man);
+    if (man->tasklet == NULL) {
+        DRM_ERROR("vmwgfx: could not establish softint\n");
+        vmw_cmdbuf_man_destroy(man);
+        return ERR_PTR(-ENOMEM);
+    }
+#else
 	tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet,
 		     (unsigned long) man);
+#endif
 	man->default_size = VMW_CMDBUF_INLINE_SIZE;
+#ifdef __NetBSD__
+    cv_init(&man->alloc_queue, "vmwgfx_cmd_alloc");
+    cv_init(&man->idle_queue, "vmwgfx_cmd_idle");
+#else
 	init_waitqueue_head(&man->alloc_queue);
 	init_waitqueue_head(&man->idle_queue);
+#endif
 	man->dev_priv = dev_priv;
 	man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
 	INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
@@ -1288,8 +1495,18 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
 		(void) ttm_bo_kunmap(&man->map_obj);
 		ttm_bo_unref(&man->cmd_space);
 	} else {
+#ifdef __NetBSD__
+        bus_dma_segment_t seg;
+
+        seg.ds_addr = man->handle;
+        seg.ds_len = man->size;
+        bus_dmamem_unmap(man->dev_priv->dev->bus_dmat, (void *)man->map,
+                man->size);
+        bus_dmamem_free(man->dev_priv->dev->bus_dmat, &seg, 1);
+#else
 		dma_free_coherent(&man->dev_priv->dev->pdev->dev,
 				  man->size, man->map, man->handle);
+#endif
 	}
 }
 
@@ -1309,11 +1526,23 @@ void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
 
 	vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
 				  &man->dev_priv->error_waiters);
+#ifdef __NetBSD__
+    if (man->tasklet != NULL) {
+        softint_disestablish(man->tasklet);
+        man->tasklet = NULL;
+    }
+#else
 	tasklet_kill(&man->tasklet);
+#endif
 	(void) cancel_work_sync(&man->work);
 	dma_pool_destroy(man->dheaders);
 	dma_pool_destroy(man->headers);
+#ifdef __NetBSD__
+	linux_mutex_destroy(&man->cur_mutex);
+	linux_mutex_destroy(&man->space_mutex);
+#else
 	mutex_destroy(&man->cur_mutex);
 	mutex_destroy(&man->space_mutex);
+#endif
 	kfree(man);
 }
diff --git a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 29c94a53..f8e5d15d 100644
--- a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -166,7 +166,6 @@ void vmw_cmdbuf_res_commit(struct list_head *list)
 void vmw_cmdbuf_res_revert(struct list_head *list)
 {
 	struct vmw_cmdbuf_res *entry, *next;
-	int ret;
 
 	list_for_each_entry_safe(entry, next, list, head) {
 		switch (entry->state) {
@@ -174,7 +173,7 @@ void vmw_cmdbuf_res_revert(struct list_head *list)
 			vmw_cmdbuf_res_free(entry->man, entry);
 			break;
 		case VMW_CMDBUF_RES_DEL:
-			ret = drm_ht_insert_item(&entry->man->resources,
+			drm_ht_insert_item(&entry->man->resources,
 						 &entry->hash);
 			list_del(&entry->head);
 			list_add_tail(&entry->head, &entry->man->list);
diff --git a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_context.c b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_context.c
index 47d7cd7b..49ef52e0 100644
--- a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_context.c
+++ b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_context.c
@@ -711,7 +711,11 @@ static void vmw_user_context_free(struct vmw_resource *res)
 
 	(void) vmw_context_bind_dx_query(res, NULL);
 
+#ifdef __NetBSD__
+    kfree(ctx);
+#else
 	ttm_base_object_kfree(ctx, base);
+#endif
 	ttm_mem_global_free(vmw_mem_glob(dev_priv),
 			    vmw_user_context_size);
 }
diff --git a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_dmabuf.c b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_dmabuf.c
index 9d8f66a3..27189ea7 100644
--- a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -294,8 +294,13 @@ void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
 	struct ttm_place pl;
 	struct ttm_placement placement;
 	struct ttm_buffer_object *bo = &vbo->base;
+#ifdef __NetBSD__
+	int ret __diagused;
+	uint32_t old_mem_type __diagused = bo->mem.mem_type;
+#else
 	uint32_t old_mem_type = bo->mem.mem_type;
 	int ret;
+#endif
 
 	lockdep_assert_held(&bo->resv->lock.base);
 
diff --git a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_drv.c b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_drv.c
index 47a1ac0d..906d2990 100644
--- a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_drv.c
+++ b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_drv.c
@@ -40,6 +40,9 @@ __KERNEL_RCSID(0, "$NetBSD: vmwgfx_drv.c,v 1.3 2018/08/27 07:03:26 riastradh Exp
 #include <drm/ttm/ttm_object.h>
 #include <drm/ttm/ttm_module.h>
 #include <linux/dma_remapping.h>
+#ifdef __NetBSD__
+#include <sys/file.h>
+#endif
 
 #define VMWGFX_DRIVER_NAME "vmwgfx"
 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
@@ -144,7 +147,7 @@ __KERNEL_RCSID(0, "$NetBSD: vmwgfx_drv.c,v 1.3 2018/08/27 07:03:26 riastradh Exp
  */
 
 #define VMW_IOCTL_DEF(ioctl, func, flags) \
-  [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
+  [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func, NULL}
 
 /**
  * Ioctl definitions.
@@ -221,26 +224,36 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
 		      DRM_AUTH | DRM_RENDER_ALLOW),
 };
 
+#ifndef __NetBSD__
 static struct pci_device_id vmw_pci_id_list[] = {
 	{0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
 	{0, 0, 0}
 };
 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
+#endif
 
+#ifndef __NetBSD__
 static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
+#endif
 static int vmw_force_iommu;
 static int vmw_restrict_iommu;
 static int vmw_force_coherent;
 static int vmw_restrict_dma_mask;
 static int vmw_assume_16bpp;
 
+#ifndef __NetBSD__
 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
+#endif
 static void vmw_master_init(struct vmw_master *);
+#ifndef __NetBSD__
 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
 			      void *ptr);
+#endif
 
+#ifndef __NetBSD__
 MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
 module_param_named(enable_fbdev, enable_fbdev, int, 0600);
+#endif
 MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
 module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
 MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
@@ -535,6 +548,9 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
 		[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
 		[vmw_dma_map_populate] = "Keeping DMA mappings.",
 		[vmw_dma_map_bind] = "Giving up DMA mappings early."};
+#ifdef __NetBSD__
+	dev_priv->map_mode = vmw_dma_alloc_coherent;
+#else
 #ifdef CONFIG_X86
 	const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
 
@@ -581,6 +597,7 @@ out_fixup:
 #else /* CONFIG_X86 */
 	dev_priv->map_mode = vmw_dma_map_populate;
 #endif /* CONFIG_X86 */
+#endif
 
 	DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
 
@@ -633,9 +650,15 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	dev_priv->dev = dev;
 	dev_priv->vmw_chipset = chipset;
 	dev_priv->last_read_seqno = (uint32_t) -100;
+#ifdef __NetBSD__
+	linux_mutex_init(&dev_priv->cmdbuf_mutex);
+	linux_mutex_init(&dev_priv->release_mutex);
+	linux_mutex_init(&dev_priv->binding_mutex);
+#else
 	mutex_init(&dev_priv->cmdbuf_mutex);
 	mutex_init(&dev_priv->release_mutex);
 	mutex_init(&dev_priv->binding_mutex);
+#endif
 	rwlock_init(&dev_priv->resource_lock);
 	ttm_lock_init(&dev_priv->reservation_sem);
 	spin_lock_init(&dev_priv->hw_lock);
@@ -648,9 +671,15 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 		INIT_LIST_HEAD(&dev_priv->res_lru[i]);
 	}
 
+#ifdef __NetBSD__
+	linux_mutex_init(&dev_priv->init_mutex);
+    cv_init(&dev_priv->fence_queue, "vmwgfx fence");
+    cv_init(&dev_priv->fifo_queue, "vmwgfx fifo");
+#else
 	mutex_init(&dev_priv->init_mutex);
 	init_waitqueue_head(&dev_priv->fence_queue);
 	init_waitqueue_head(&dev_priv->fifo_queue);
+#endif
 	dev_priv->fence_queue_waiters = 0;
 	dev_priv->fifo_queue_waiters = 0;
 
@@ -659,10 +688,24 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
 	dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
 	dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
+#ifdef __NetBSD__
+    ret = bus_space_map(dev->pdev->pd_pa.pa_iot,
+            dev->pdev->pd_resources[0].addr,
+            dev->pdev->pd_resources[0].size, 0,
+            &dev_priv->io_bsh);
+    if (ret != 0) {
+        ret = -ENXIO;
+        goto out_err0;
+    }
+#endif
 
 	dev_priv->assume_16bpp = !!vmw_assume_16bpp;
 
+#ifdef __NetBSD__
+	dev_priv->enable_fb = true;
+#else
 	dev_priv->enable_fb = enable_fbdev;
+#endif
 
 	vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
 	svga_id = vmw_read(dev_priv, SVGA_REG_ID);
@@ -770,8 +813,21 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
 	dev_priv->active_master = &dev_priv->fbdev_master;
 
+#ifdef __NetBSD__
+    ret = bus_space_map(dev_priv->dev->bst, dev_priv->mmio_start,
+            dev_priv->mmio_size,
+            BUS_SPACE_MAP_LINEAR, &dev_priv->mmio_bsh);
+    if (ret != 0) {
+        ret = -ENXIO;
+        printf("vmwgfx: could not map mmio\n");
+        goto out_err0;
+    }
+    dev_priv->mmio_virt = bus_space_vaddr(dev_priv->dev->bst,
+            dev_priv->mmio_bsh);
+#else
 	dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
 				       dev_priv->mmio_size, MEMREMAP_WB);
+#endif
 
 	if (unlikely(dev_priv->mmio_virt == NULL)) {
 		ret = -ENOMEM;
@@ -799,6 +855,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 
 	dev->dev_private = dev_priv;
 
+#ifndef __NetBSD__
 	ret = pci_request_regions(dev->pdev, "vmwgfx probe");
 	dev_priv->stealth = (ret != 0);
 	if (dev_priv->stealth) {
@@ -814,6 +871,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 			goto out_no_device;
 		}
 	}
+#endif
 
 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
 #ifdef __NetBSD__
@@ -836,7 +894,12 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	ret = ttm_bo_device_init(&dev_priv->bdev,
 				 dev_priv->bo_global_ref.ref.object,
 				 &vmw_bo_driver,
+#ifdef __NetBSD__
+                dev->bst,
+                dev->dmat,
+#else
 				 dev->anon_inode->i_mapping,
+#endif
 				 VMWGFX_FILE_PAGE_OFFSET,
 				 false);
 	if (unlikely(ret != 0)) {
@@ -900,8 +963,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 		vmw_fb_init(dev_priv);
 	}
 
+#ifndef __NetBSD__
 	dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
 	register_pm_notifier(&dev_priv->pm_nb);
+#endif
 
 	return 0;
 
@@ -922,14 +987,18 @@ out_no_fman:
 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
 		drm_irq_uninstall(dev_priv->dev);
 out_no_irq:
+#ifndef __NetBSD__
 	if (dev_priv->stealth)
 		pci_release_region(dev->pdev, 2);
 	else
 		pci_release_regions(dev->pdev);
 out_no_device:
+#endif
 	ttm_object_device_release(&dev_priv->tdev);
 out_err4:
+#ifndef __NetBSD__
 	memunmap(dev_priv->mmio_virt);
+#endif
 out_err3:
 	vmw_ttm_global_release(dev_priv);
 out_err0:
@@ -947,13 +1016,17 @@ static int vmw_driver_unload(struct drm_device *dev)
 	struct vmw_private *dev_priv = vmw_priv(dev);
 	enum vmw_res_type i;
 
+#ifndef __NetBSD__
 	unregister_pm_notifier(&dev_priv->pm_nb);
+#endif
 
 	if (dev_priv->ctx.res_ht_initialized)
 		drm_ht_remove(&dev_priv->ctx.res_ht);
 	vfree(dev_priv->ctx.cmd_bounce);
 	if (dev_priv->enable_fb) {
+#ifndef __NetBSD__
 		vmw_fb_off(dev_priv);
+#endif
 		vmw_fb_close(dev_priv);
 		vmw_fifo_resource_dec(dev_priv);
 		vmw_svga_disable(dev_priv);
@@ -974,13 +1047,16 @@ static int vmw_driver_unload(struct drm_device *dev)
 	vmw_fence_manager_takedown(dev_priv->fman);
 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
 		drm_irq_uninstall(dev_priv->dev);
+#ifndef __NetBSD__
 	if (dev_priv->stealth)
 		pci_release_region(dev->pdev, 2);
 	else
 		pci_release_regions(dev->pdev);
-
+#endif
 	ttm_object_device_release(&dev_priv->tdev);
+#ifndef __NetBSD__
 	memunmap(dev_priv->mmio_virt);
+#endif
 	if (dev_priv->ctx.staged_bindings)
 		vmw_binding_state_free(dev_priv->ctx.staged_bindings);
 	vmw_ttm_global_release(dev_priv);
@@ -1096,12 +1172,23 @@ static struct vmw_master *vmw_master_check(struct drm_device *dev,
 	return vmaster;
 }
 
+#ifdef __NetBSD__
+static int vmw_generic_ioctl(struct file *filp, unsigned long cmd,
+			      void *arg,
+			      int (*ioctl_func)(struct file *, unsigned long,
+						 void *))
+#else
 static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
 			      unsigned long arg,
 			      long (*ioctl_func)(struct file *, unsigned int,
 						 unsigned long))
+#endif
 {
+#ifdef __NetBSD__
+	struct drm_file *file_priv = filp->f_data;
+#else
 	struct drm_file *file_priv = filp->private_data;
+#endif
 	struct drm_device *dev = file_priv->minor->dev;
 	unsigned int nr = DRM_IOCTL_NR(cmd);
 	struct vmw_master *vmaster;
@@ -1125,8 +1212,13 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
 			if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
 				goto out_io_encoding;
 
+#ifdef __NetBSD__
+			return (long) vmw_execbuf_ioctl(dev, (unsigned long)arg, file_priv,
+							IOCPARM_LEN(cmd));
+#else
 			return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
 							_IOC_SIZE(cmd));
+#endif
 		}
 
 		if (unlikely(ioctl->cmd != cmd))
@@ -1159,8 +1251,13 @@ out_io_encoding:
 	return -EINVAL;
 }
 
+#ifdef __NetBSD__
+static int vmw_unlocked_ioctl(struct file *filp, unsigned long cmd,
+			       void *arg)
+#else
 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
 			       unsigned long arg)
+#endif
 {
 	return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
 }
@@ -1270,8 +1367,11 @@ static void vmw_master_drop(struct drm_device *dev,
 	ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
 	ttm_vt_unlock(&dev_priv->fbdev_master.lock);
 
+#ifndef __NetBSD__
+    /* XXX */
 	if (dev_priv->enable_fb)
 		vmw_fb_on(dev_priv);
+#endif
 }
 
 /**
@@ -1302,6 +1402,7 @@ void vmw_svga_enable(struct vmw_private *dev_priv)
 	ttm_read_unlock(&dev_priv->reservation_sem);
 }
 
+#ifndef __NetBSD__
 /**
  * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
  *
@@ -1320,6 +1421,7 @@ static void __vmw_svga_disable(struct vmw_private *dev_priv)
 	}
 	spin_unlock(&dev_priv->svga_lock);
 }
+#endif
 
 /**
  * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
@@ -1345,6 +1447,7 @@ void vmw_svga_disable(struct vmw_private *dev_priv)
 	ttm_write_unlock(&dev_priv->reservation_sem);
 }
 
+#ifndef __NetBSD__
 static void vmw_remove(struct pci_dev *pdev)
 {
 	struct drm_device *dev = pci_get_drvdata(pdev);
@@ -1480,6 +1583,10 @@ static int vmw_pm_restore(struct device *kdev)
 	return 0;
 }
 
+#endif /* __NetBSD__ */
+
+#ifndef __NetBSD__
+
 static const struct dev_pm_ops vmw_pm_ops = {
 	.freeze = vmw_pm_freeze,
 	.thaw = vmw_pm_restore,
@@ -1502,6 +1609,8 @@ static const struct file_operations vmwgfx_driver_fops = {
 	.llseek = noop_llseek,
 };
 
+#endif /* __NetBSD__ */
+
 static struct drm_driver driver = {
 	.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
 	DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
@@ -1537,7 +1646,15 @@ static struct drm_driver driver = {
 	.prime_fd_to_handle = vmw_prime_fd_to_handle,
 	.prime_handle_to_fd = vmw_prime_handle_to_fd,
 
+#ifdef __NetBSD__
+    .fops = NULL,
+    .mmap_object = vmw_mmap,
+    .fops_pre_read = vmw_fops_ping_host,
+    .fops_pre_poll = vmw_fops_ping_host,
+    .ioctl_override = vmw_unlocked_ioctl,
+#else
 	.fops = &vmwgfx_driver_fops,
+#endif
 	.name = VMWGFX_DRIVER_NAME,
 	.desc = VMWGFX_DRIVER_DESC,
 	.date = VMWGFX_DRIVER_DATE,
@@ -1546,6 +1663,11 @@ static struct drm_driver driver = {
 	.patchlevel = VMWGFX_DRIVER_PATCHLEVEL
 };
 
+#ifdef __NetBSD__
+struct drm_driver *const vmwgfx_drm_driver = &driver;
+#endif
+
+#ifndef __NetBSD__
 static struct pci_driver vmw_pci_driver = {
 	.name = VMWGFX_DRIVER_NAME,
 	.id_table = vmw_pci_id_list,
@@ -1583,11 +1705,14 @@ static void __exit vmwgfx_exit(void)
 
 module_init(vmwgfx_init);
 module_exit(vmwgfx_exit);
+#endif  /* __NetBSD__ */
 
 MODULE_AUTHOR("VMware Inc. and others");
 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
 MODULE_LICENSE("GPL and additional rights");
+#ifndef __NetBSD__
 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
 	       __stringify(VMWGFX_DRIVER_MINOR) "."
 	       __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
 	       "0");
+#endif
diff --git a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_drv.h b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_drv.h
index 1d8209fc..22a625c3 100644
--- a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_drv.h
+++ b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_drv.h
@@ -32,9 +32,11 @@
 
 #include "vmwgfx_reg.h"
 #include <drm/drmP.h>
+#include <drm/drm_legacy.h>
 #include <drm/vmwgfx_drm.h>
 #include <drm/drm_hashtab.h>
 #include <linux/suspend.h>
+#include <linux/rwsem.h>
 #include <drm/ttm/ttm_bo_driver.h>
 #include <drm/ttm/ttm_object.h>
 #include <drm/ttm/ttm_lock.h>
@@ -247,8 +249,12 @@ enum vmw_dma_map_mode {
 struct vmw_sg_table {
 	enum vmw_dma_map_mode mode;
 	struct page **pages;
+#ifdef __NetBSD__
+    bus_dmamap_t addrs;
+#else
 	const dma_addr_t *addrs;
 	struct sg_table *sgt;
+#endif
 	unsigned long num_regions;
 	unsigned long num_pages;
 };
@@ -269,8 +275,12 @@ struct vmw_sg_table {
  */
 struct vmw_piter {
 	struct page **pages;
+#ifdef __NetBSD__
+    bus_dmamap_t addrs;
+#else
 	const dma_addr_t *addrs;
 	struct sg_page_iter iter;
+#endif
 	unsigned long i;
 	unsigned long num_pages;
 	bool (*next)(struct vmw_piter *);
@@ -354,6 +364,10 @@ struct vmw_otable_batch {
 	struct ttm_buffer_object *otable_bo;
 };
 
+#ifdef __NetBSD__
+struct vmw_fb_par;
+#endif
+
 struct vmw_private {
 	struct ttm_bo_device bdev;
 	struct ttm_bo_global_ref bo_global_ref;
@@ -363,6 +377,9 @@ struct vmw_private {
 
 	struct drm_device *dev;
 	unsigned long vmw_chipset;
+#ifdef __NetBSD__
+    bus_space_handle_t io_bsh;
+#endif
 	unsigned int io_start;
 	uint32_t vram_start;
 	uint32_t vram_size;
@@ -378,6 +395,9 @@ struct vmw_private {
 	uint32_t initial_width;
 	uint32_t initial_height;
 	u32 *mmio_virt;
+#ifdef __NetBSD__
+    bus_space_handle_t mmio_bsh;
+#endif
 	uint32_t capabilities;
 	uint32_t max_gmr_ids;
 	uint32_t max_gmr_pages;
@@ -408,7 +428,11 @@ struct vmw_private {
 	 * Framebuffer info.
 	 */
 
+#ifdef __NetBSD__
+    struct vmw_fb_par *fb_par;
+#else
 	void *fb_info;
+#endif
 	enum vmw_display_unit_type active_display_unit;
 	struct vmw_legacy_display *ldu_priv;
 	struct vmw_screen_object_display *sou_priv;
@@ -562,8 +586,15 @@ static inline void vmw_write(struct vmw_private *dev_priv,
 	unsigned long irq_flags;
 
 	spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
+#ifdef __NetBSD__
+    bus_space_write_4(dev_priv->dev->pdev->pd_pa.pa_iot, dev_priv->io_bsh,
+            VMWGFX_INDEX_PORT, offset);
+    bus_space_write_4(dev_priv->dev->pdev->pd_pa.pa_iot, dev_priv->io_bsh,
+            VMWGFX_VALUE_PORT, value);
+#else
 	outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
 	outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
+#endif
 	spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
 }
 
@@ -574,8 +605,15 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv,
 	u32 val;
 
 	spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
+#ifdef __NetBSD__
+    bus_space_write_4(dev_priv->dev->pdev->pd_pa.pa_iot, dev_priv->io_bsh,
+            VMWGFX_INDEX_PORT, offset);
+    val = bus_space_read_4(dev_priv->dev->pdev->pd_pa.pa_iot, dev_priv->io_bsh,
+            VMWGFX_VALUE_PORT);
+#else
 	outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
 	val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
+#endif
 	spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
 
 	return val;
@@ -705,10 +743,12 @@ extern int vmw_present_ioctl(struct drm_device *dev, void *data,
 			     struct drm_file *file_priv);
 extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
 				      struct drm_file *file_priv);
+#ifndef __NetBSD__
 extern unsigned int vmw_fops_poll(struct file *filp,
 				  struct poll_table_struct *wait);
 extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
 			     size_t count, loff_t *offset);
+#endif
 
 /**
  * Fifo utilities - vmwgfx_fifo.c
@@ -740,7 +780,13 @@ extern int vmw_fifo_flush(struct vmw_private *dev_priv,
 
 extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
 extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
+#ifdef __NetBSD__
+extern int vmw_mmap(struct drm_device *, off_t, size_t, int,
+        struct uvm_object **, voff_t *, struct file *);
+extern void vmw_fops_ping_host(struct drm_device *);
+#else
 extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
+#endif
 
 /**
  * TTM buffer object driver - vmwgfx_buffer.c
@@ -845,7 +891,11 @@ extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
  * IRQs and wating - vmwgfx_irq.c
  */
 
+#ifdef __NetBSD__
+extern irqreturn_t vmw_irq_handler(void *arg);
+#else
 extern irqreturn_t vmw_irq_handler(int irq, void *arg);
+#endif
 extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
 			  uint32_t seqno, bool interruptible,
 			  unsigned long timeout);
@@ -893,6 +943,9 @@ int vmw_fb_init(struct vmw_private *vmw_priv);
 int vmw_fb_close(struct vmw_private *dev_priv);
 int vmw_fb_off(struct vmw_private *vmw_priv);
 int vmw_fb_on(struct vmw_private *vmw_priv);
+#ifdef __NetBSD__
+void vmw_fb_dirty(struct vmw_private *vmw_priv);
+#endif
 
 /**
  * Kernel modesetting - vmwgfx_kms.c
@@ -1220,7 +1273,11 @@ static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
  */
 static inline u32 vmw_mmio_read(u32 *addr)
 {
+#ifdef __NetBSD__
+    return *addr;
+#else
 	return READ_ONCE(*addr);
+#endif
 }
 
 /**
@@ -1233,6 +1290,10 @@ static inline u32 vmw_mmio_read(u32 *addr)
  */
 static inline void vmw_mmio_write(u32 value, u32 *addr)
 {
+#ifdef __NetBSD__
+    *addr = value;
+#else
 	WRITE_ONCE(*addr, value);
+#endif
 }
 #endif
diff --git a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_execbuf.c b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_execbuf.c
index e798baa2..b2b6c30b 100644
--- a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_execbuf.c
@@ -737,7 +737,9 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Could not find or use resource 0x%08x.\n",
 			  (unsigned) *id_loc);
+#ifndef __NetBSD__
 		dump_stack();
+#endif
 		return ret;
 	}
 
diff --git a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_fb.c b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_fb.c
index 5d4b61ac..7355edb1 100644
--- a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_fb.c
+++ b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_fb.c
@@ -39,12 +39,21 @@ __KERNEL_RCSID(0, "$NetBSD: vmwgfx_fb.c,v 1.2 2018/08/27 04:58:37 riastradh Exp
 
 #include <drm/ttm/ttm_placement.h>
 
+#ifdef __NetBSD__
+#include "vmwgfxfb.h"
+#endif
+
 #define VMW_DIRTY_DELAY (HZ / 30)
 
 struct vmw_fb_par {
+#ifdef __NetBSD__
+    struct drm_fb_helper helper;
+#endif
 	struct vmw_private *vmw_priv;
 
+#ifndef __NetBSD__
 	void *vmalloc;
+#endif
 
 	struct mutex bo_mutex;
 	struct vmw_dma_buffer *vmw_bo;
@@ -53,8 +62,10 @@ struct vmw_fb_par {
 	unsigned bo_size;
 	struct drm_framebuffer *set_fb;
 	struct drm_display_mode *set_mode;
+#ifndef __NetBSD__
 	u32 fb_x;
 	u32 fb_y;
+#endif
 	bool bo_iowrite;
 
 	u32 pseudo_palette[17];
@@ -62,6 +73,7 @@ struct vmw_fb_par {
 	unsigned max_width;
 	unsigned max_height;
 
+#ifndef __NetBSD__
 	struct {
 		spinlock_t lock;
 		bool active;
@@ -70,12 +82,16 @@ struct vmw_fb_par {
 		unsigned x2;
 		unsigned y2;
 	} dirty;
+#endif
 
 	struct drm_crtc *crtc;
 	struct drm_connector *con;
+#ifndef __NetBSD__
 	struct delayed_work local_work;
+#endif
 };
 
+#ifndef __NetBSD__
 static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
 			    unsigned blue, unsigned transp,
 			    struct fb_info *info)
@@ -372,6 +388,8 @@ static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
 			  image->width, image->height);
 }
 
+#endif
+
 /*
  * Bring up code
  */
@@ -407,6 +425,7 @@ err_unlock:
 	return ret;
 }
 
+#ifndef __NetBSD__
 static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
 				int *depth)
 {
@@ -421,6 +440,7 @@ static int vmw_fb_compute_depth(struct fb_var_screeninfo *var,
 
 	return 0;
 }
+#endif
 
 static int vmw_fb_kms_detach(struct vmw_fb_par *par,
 			     bool detach_bo,
@@ -468,16 +488,29 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
 	return 0;
 }
 
+#ifdef __NetBSD__
+static int vmw_fb_kms_framebuffer(struct vmw_fb_par *par,
+    struct drm_fb_helper_surface_size *sizes)
+#else
 static int vmw_fb_kms_framebuffer(struct fb_info *info)
+#endif
 {
 	struct drm_mode_fb_cmd mode_cmd;
+#ifndef __NetBSD__
 	struct vmw_fb_par *par = info->par;
 	struct fb_var_screeninfo *var = &info->var;
+#endif
 	struct drm_framebuffer *cur_fb;
 	struct vmw_framebuffer *vfb;
 	int ret = 0;
 	size_t new_bo_size;
 
+#ifdef __NetBSD__
+    mode_cmd.depth = 24;    /* XXX */
+	mode_cmd.width = sizes->fb_width;
+	mode_cmd.height = sizes->fb_height;
+	mode_cmd.bpp = sizes->surface_bpp;
+#else
 	ret = vmw_fb_compute_depth(var, &mode_cmd.depth);
 	if (ret)
 		return ret;
@@ -485,6 +518,7 @@ static int vmw_fb_kms_framebuffer(struct fb_info *info)
 	mode_cmd.width = var->xres;
 	mode_cmd.height = var->yres;
 	mode_cmd.bpp = var->bits_per_pixel;
+#endif
 	mode_cmd.pitch = ((mode_cmd.bpp + 7) / 8) * mode_cmd.width;
 
 	cur_fb = par->set_fb;
@@ -513,6 +547,7 @@ static int vmw_fb_kms_framebuffer(struct fb_info *info)
 			return ret;
 		}
 		par->bo_size = new_bo_size;
+
 	}
 
 	vfb = vmw_kms_new_framebuffer(par->vmw_priv, par->vmw_bo, NULL,
@@ -525,12 +560,20 @@ static int vmw_fb_kms_framebuffer(struct fb_info *info)
 	return 0;
 }
 
+#ifdef __NetBSD__
+static int vmw_fb_set_par(struct vmw_fb_par *par, struct drm_fb_helper_surface_size *sizes)
+#else
 static int vmw_fb_set_par(struct fb_info *info)
+#endif
 {
+#ifndef __NetBSD__
 	struct vmw_fb_par *par = info->par;
+#endif
 	struct vmw_private *vmw_priv = par->vmw_priv;
 	struct drm_mode_set set;
+#ifndef __NetBSD__
 	struct fb_var_screeninfo *var = &info->var;
+#endif
 	struct drm_display_mode new_mode = { DRM_MODE("fb_mode",
 		DRM_MODE_TYPE_DRIVER,
 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -547,8 +590,13 @@ static int vmw_fb_set_par(struct fb_info *info)
 		return -ENOMEM;
 	}
 
+#ifdef __NetBSD__
+	mode->hdisplay = sizes->fb_width;
+	mode->vdisplay = sizes->fb_height;
+#else
 	mode->hdisplay = var->xres;
 	mode->vdisplay = var->yres;
+#endif
 	vmw_guess_mode_timing(mode);
 
 	if (old_mode && drm_mode_equal(old_mode, mode)) {
@@ -557,7 +605,11 @@ static int vmw_fb_set_par(struct fb_info *info)
 		old_mode = NULL;
 	} else if (!vmw_kms_validate_mode_vram(vmw_priv,
 					mode->hdisplay *
+#ifdef __NetBSD__
+					DIV_ROUND_UP(sizes->surface_bpp, 8),
+#else
 					DIV_ROUND_UP(var->bits_per_pixel, 8),
+#endif
 					mode->vdisplay)) {
 		drm_mode_destroy(vmw_priv->dev, mode);
 		return -EINVAL;
@@ -565,12 +617,18 @@ static int vmw_fb_set_par(struct fb_info *info)
 
 	mutex_lock(&par->bo_mutex);
 	drm_modeset_lock_all(vmw_priv->dev);
+#ifdef __NetBSD__
+	ret = vmw_fb_kms_framebuffer(par, sizes);
+#else
 	ret = vmw_fb_kms_framebuffer(info);
+#endif
 	if (ret)
 		goto out_unlock;
 
+#ifndef __NetBSD__
 	par->fb_x = var->xoffset;
 	par->fb_y = var->yoffset;
+#endif
 
 	set.crtc = par->crtc;
 	set.x = 0;
@@ -608,7 +666,7 @@ static int vmw_fb_set_par(struct fb_info *info)
 		par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
 	}
 
-
+#ifndef __NetBSD__
 	vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
 			  par->set_fb->width, par->set_fb->height);
 
@@ -616,7 +674,7 @@ static int vmw_fb_set_par(struct fb_info *info)
 	 * schedule a new work, so lets do it now */
 
 	schedule_delayed_work(&par->local_work, 0);
-
+#endif
 out_unlock:
 	if (old_mode)
 		drm_mode_destroy(vmw_priv->dev, old_mode);
@@ -628,7 +686,7 @@ out_unlock:
 	return ret;
 }
 
-
+#ifndef __NetBSD__
 static struct fb_ops vmw_fb_ops = {
 	.owner = THIS_MODULE,
 	.fb_check_var = vmw_fb_check_var,
@@ -640,17 +698,73 @@ static struct fb_ops vmw_fb_ops = {
 	.fb_pan_display = vmw_fb_pan_display,
 	.fb_blank = vmw_fb_blank,
 };
+#endif
+
+#ifdef __NetBSD__
+static int vmwgfxfb_create(struct drm_fb_helper *helper,
+        struct drm_fb_helper_surface_size *sizes);
+
+static const struct drm_fb_helper_funcs vmwgfx_fb_helper_funcs = {
+    .fb_probe = vmwgfxfb_create
+};
 
 int vmw_fb_init(struct vmw_private *vmw_priv)
 {
+    struct vmw_fb_par *par;
+    int ret;
+
+    par = kzalloc(sizeof(*par), GFP_KERNEL);
+    if (!par)
+        return -ENOMEM;
+    par->vmw_priv = vmw_priv;
+
+    vmw_priv->fb_par = par;
+
+    drm_fb_helper_prepare(vmw_priv->dev, &par->helper,
+            &vmwgfx_fb_helper_funcs);
+
+    ret = drm_fb_helper_init(vmw_priv->dev, &par->helper,
+            vmw_priv->num_displays, 4);
+    if (ret)
+        goto free;
+
+    //drm_helper_disable_unused_functions(vmw_priv->dev);
+
+    ret = drm_fb_helper_initial_config(&par->helper, 32);
+    if (ret)
+        goto fini;
+
+    return 0;
+fini:
+    drm_fb_helper_fini(&par->helper);
+free:
+    kfree(par);
+    vmw_priv->fb_par = NULL;
+    return ret;
+}
+
+static int vmwgfxfb_create(struct drm_fb_helper *helper,
+        struct drm_fb_helper_surface_size *sizes)
+#else
+int vmw_fb_init(struct vmw_private *vmw_priv)
+#endif
+{
+#ifndef __NetBSD__
 	struct device *device = &vmw_priv->dev->pdev->dev;
+#endif
 	struct vmw_fb_par *par;
+#ifdef __NetBSD__
+    struct vmw_private *vmw_priv;
+#endif
+#ifndef __NetBSD__
 	struct fb_info *info;
 	unsigned fb_width, fb_height;
 	unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
+#endif
 	struct drm_display_mode *init_mode;
 	int ret;
 
+#ifndef __NetBSD__
 	fb_bpp = 32;
 	fb_depth = 24;
 
@@ -661,7 +775,12 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
 	fb_pitch = fb_width * fb_bpp / 8;
 	fb_size = fb_pitch * fb_height;
 	fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
+#endif
 
+#ifdef __NetBSD__
+    par = container_of(helper, struct vmw_fb_par, helper);
+    vmw_priv = par->vmw_priv;
+#else
 	info = framebuffer_alloc(sizeof(*par), device);
 	if (!info)
 		return -ENOMEM;
@@ -675,8 +794,14 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
 	INIT_DELAYED_WORK(&par->local_work, &vmw_fb_dirty_flush);
 	par->vmw_priv = vmw_priv;
 	par->vmalloc = NULL;
+#endif
+#ifdef __NetBSD__
+    par->max_width = sizes->surface_width;
+    par->max_height = sizes->surface_height;
+#else
 	par->max_width = fb_width;
 	par->max_height = fb_height;
+#endif
 
 	drm_modeset_lock_all(vmw_priv->dev);
 	ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
@@ -687,10 +812,13 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
 		goto err_kms;
 	}
 
+#ifndef __NetBSD__
 	info->var.xres = init_mode->hdisplay;
 	info->var.yres = init_mode->vdisplay;
+#endif
 	drm_modeset_unlock_all(vmw_priv->dev);
 
+#ifndef __NetBSD__
 	/*
 	 * Create buffers and alloc memory
 	 */
@@ -750,7 +878,9 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
 	}
 	info->apertures->ranges[0].base = vmw_priv->vram_start;
 	info->apertures->ranges[0].size = vmw_priv->vram_size;
+#endif
 
+#ifndef __NetBSD__
 	/*
 	 * Dirty & Deferred IO
 	 */
@@ -758,7 +888,33 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
 	par->dirty.y1 = par->dirty.y2 = 0;
 	par->dirty.active = true;
 	spin_lock_init(&par->dirty.lock);
+#endif
+#ifdef __NetBSD__
+	linux_mutex_init(&par->bo_mutex);
+#else
 	mutex_init(&par->bo_mutex);
+#endif
+#ifdef __NetBSD__
+	vmw_fb_set_par(par, sizes);
+
+    {
+    struct vmwgfxfb_attach_args ifa;
+
+    ifa.drm_dev = par->vmw_priv->dev;
+    ifa.fb_bo = par->vmw_bo;
+    ifa.fb_ptr = par->bo_ptr;
+    ifa.drm_fb_helper = helper;
+    ifa.fb_sizes = *sizes;
+
+    helper->fbdev = config_found_ia(vmw_priv->dev->dev, "vmwgfxfbbus", &ifa,
+            NULL);
+    if (helper->fbdev == NULL) {
+        aprint_error_dev(vmw_priv->dev->dev, "failed to attach vmwgfxfb\n");
+        goto fail_attach;
+    }
+    }
+    par->helper.fb = par->set_fb;
+#else
 	info->fbdefio = &vmw_defio;
 	fb_deferred_io_init(info);
 
@@ -767,45 +923,90 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
 		goto err_defio;
 
 	vmw_fb_set_par(info);
+#endif
 
 	return 0;
 
+#ifdef __NetBSD__
+fail_attach:
+    /* TODO cleanup results of set_par */
+    return -ENXIO;
+#else
 err_defio:
 	fb_deferred_io_cleanup(info);
 err_aper:
 err_free:
 	vfree(par->vmalloc);
+#endif
 err_kms:
+#ifndef __NetBSD__
 	framebuffer_release(info);
 	vmw_priv->fb_info = NULL;
+#endif
 
 	return ret;
 }
 
 int vmw_fb_close(struct vmw_private *vmw_priv)
 {
+#ifndef __NetBSD__
 	struct fb_info *info;
+#endif
 	struct vmw_fb_par *par;
+#ifdef __NetBSD__
+    int ret;
+#endif
 
+#ifdef __NetBSD__
+	if (!vmw_priv->fb_par)
+#else
 	if (!vmw_priv->fb_info)
+#endif
 		return 0;
 
+#ifdef __NetBSD__
+    par = vmw_priv->fb_par;
+#else
 	info = vmw_priv->fb_info;
 	par = info->par;
+#endif
+
+#ifdef __NetBSD__
+    if (par->helper.fbdev) {
+        ret = config_detach(par->helper.fbdev, DETACH_FORCE);
+        if (ret)
+            aprint_error_dev(vmw_priv->dev->dev, "failed to detach vmwgfxfb\n");
+    }
+
+    /* XXX copy from fb_off */
+	mutex_lock(&par->bo_mutex);
+	drm_modeset_lock_all(vmw_priv->dev);
+	(void) vmw_fb_kms_detach(par, true, false);
+	drm_modeset_unlock_all(vmw_priv->dev);
+	mutex_unlock(&par->bo_mutex);
+#endif
 
 	/* ??? order */
+#ifndef __NetBSD__
 	fb_deferred_io_cleanup(info);
 	cancel_delayed_work_sync(&par->local_work);
 	unregister_framebuffer(info);
+#endif
 
 	(void) vmw_fb_kms_detach(par, true, true);
 
+#ifdef __NetBSD__
+    kfree(par);
+    vmw_priv->fb_par = NULL;
+#else
 	vfree(par->vmalloc);
 	framebuffer_release(info);
+#endif
 
 	return 0;
 }
 
+#ifndef __NetBSD__
 int vmw_fb_off(struct vmw_private *vmw_priv)
 {
 	struct fb_info *info;
@@ -853,3 +1054,16 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
  
 	return 0;
 }
+#endif
+
+#ifdef __NetBSD__
+void
+vmw_fb_dirty(struct vmw_private *vmw_priv)
+{
+    struct vmw_fb_par *par = vmw_priv->fb_par;
+
+	mutex_lock(&par->bo_mutex);
+    par->set_fb->funcs->dirty(par->set_fb, NULL, 0, 0, NULL, 0);
+	mutex_unlock(&par->bo_mutex);
+}
+#endif
diff --git a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_fence.c b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_fence.c
index 0a549c61..fa8a7e22 100644
--- a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_fence.c
+++ b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_fence.c
@@ -157,22 +157,44 @@ static bool vmw_fence_enable_signaling(struct fence *f)
 	return true;
 }
 
+#ifndef __NetBSD__
 struct vmwgfx_wait_cb {
 	struct fence_cb base;
 	struct task_struct *task;
 };
+#endif
 
 static void
 vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb)
 {
+#ifdef __NetBSD__
+	struct vmw_fence_obj *vmw_fence =
+		container_of(fence, struct vmw_fence_obj, base);
+
+	struct vmw_fence_manager *fman = fman_from_fence(vmw_fence);
+	struct vmw_private *dev_priv = fman->dev_priv;
+
+    cv_broadcast(&dev_priv->fence_queue);
+#else
 	struct vmwgfx_wait_cb *wait =
 		container_of(cb, struct vmwgfx_wait_cb, base);
 
 	wake_up_process(wait->task);
+#endif
 }
 
 static void __vmw_fences_update(struct vmw_fence_manager *fman);
 
+#ifdef __NetBSD__
+static bool
+vmwgfx_test_signaled(struct vmw_fence_obj *fence)
+{
+	struct vmw_fence_manager *fman = fman_from_fence(fence);
+	__vmw_fences_update(fman);
+	return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
+}
+#endif
+
 static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout)
 {
 	struct vmw_fence_obj *fence =
@@ -180,9 +202,15 @@ static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout)
 
 	struct vmw_fence_manager *fman = fman_from_fence(fence);
 	struct vmw_private *dev_priv = fman->dev_priv;
+#ifdef __NetBSD__
+    struct fence_cb fcb;
+#else
 	struct vmwgfx_wait_cb cb;
+#endif
 	long ret = timeout;
+#ifndef __NetBSD__
 	unsigned long irq_flags;
+#endif
 
 	if (likely(vmw_fence_obj_signaled(fence)))
 		return timeout;
@@ -190,6 +218,25 @@ static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout)
 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
 	vmw_seqno_waiter_add(dev_priv);
 
+#ifdef __NetBSD__
+    ret = fence_add_callback(f, &fcb, vmwgfx_wait_cb);
+    if (ret)
+        return timeout;
+
+    spin_lock(&fman->lock);
+    if (intr) {
+        DRM_SPIN_TIMED_WAIT_UNTIL(ret, &dev_priv->fence_queue,
+                &fman->lock, timeout,
+                vmwgfx_test_signaled(fence));
+    } else {
+        DRM_SPIN_TIMED_WAIT_NOINTR_UNTIL(ret, &dev_priv->fence_queue,
+                &fman->lock, timeout,
+                vmwgfx_test_signaled(fence));
+    }
+    spin_unlock(&fman->lock);
+
+    fence_remove_callback(f, &fcb);
+#else
 	spin_lock_irqsave(f->lock, irq_flags);
 
 	if (intr && signal_pending(current)) {
@@ -199,6 +246,7 @@ static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout)
 
 	cb.base.func = vmwgfx_wait_cb;
 	cb.task = current;
+    cb.dev = dev_priv;
 	list_add(&cb.base.node, &f->cb_list);
 
 	while (ret > 0) {
@@ -228,6 +276,8 @@ out:
 
 	vmw_seqno_waiter_remove(dev_priv);
 
+#endif
+
 	return ret;
 }
 
@@ -303,7 +353,11 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
 	fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
 	fman->event_fence_action_size =
 		ttm_round_pot(sizeof(struct vmw_event_fence_action));
+#ifdef __NetBSD__
+    linux_mutex_init(&fman->goal_irq_mutex);
+#else
 	mutex_init(&fman->goal_irq_mutex);
+#endif
 	fman->ctx = fence_context_alloc(1);
 
 	return fman;
@@ -312,7 +366,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
 {
 	unsigned long irq_flags;
-	bool lists_empty;
+	bool lists_empty __diagused;
 
 	(void) cancel_work_sync(&fman->work);
 
@@ -493,13 +547,21 @@ rerun:
 		(void) schedule_work(&fman->work);
 }
 
-void vmw_fences_update(struct vmw_fence_manager *fman)
+void vmw_fences_update(struct vmw_fence_manager *fman, bool wakeup)
 {
 	unsigned long irq_flags;
 
 	spin_lock_irqsave(&fman->lock, irq_flags);
 	__vmw_fences_update(fman);
+#ifdef __NetBSD__
+    if (wakeup)
+        cv_broadcast(&fman->dev_priv->fence_queue);
+#endif
 	spin_unlock_irqrestore(&fman->lock, irq_flags);
+#ifndef __NetBSD__
+    if (wakeup)
+        wake_up_all(&fman->dev_priv->fence_queue);
+#endif
 }
 
 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
@@ -509,7 +571,7 @@ bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
 	if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
 		return 1;
 
-	vmw_fences_update(fman);
+	vmw_fences_update(fman, false);
 
 	return fence_is_signaled(&fence->base);
 }
@@ -986,7 +1048,7 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
 			fman->goal_irq_on = true;
 			vmw_goal_waiter_add(fman->dev_priv);
 		}
-		vmw_fences_update(fman);
+		vmw_fences_update(fman, false);
 	}
 	mutex_unlock(&fman->goal_irq_mutex);
 
diff --git a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_fence.h b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_fence.h
index 0620f8ce..03b71fe4 100644
--- a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_fence.h
+++ b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_fence.h
@@ -84,7 +84,7 @@ vmw_fence_obj_reference(struct vmw_fence_obj *fence)
 	return fence;
 }
 
-extern void vmw_fences_update(struct vmw_fence_manager *fman);
+extern void vmw_fences_update(struct vmw_fence_manager *fman, bool wakeup);
 
 extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence);
 
diff --git a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_fifo.c b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_fifo.c
index 403595a9..0b720695 100644
--- a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_fifo.c
+++ b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_fifo.c
@@ -119,7 +119,11 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 	fifo->reserved_size = 0;
 	fifo->using_bounce_buffer = false;
 
+#ifdef __NetBSD__
+	linux_mutex_init(&fifo->fifo_mutex);
+#else
 	mutex_init(&fifo->fifo_mutex);
+#endif
 	init_rwsem(&fifo->rwsem);
 
 	DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
@@ -173,7 +177,11 @@ void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
 	u32 *fifo_mem = dev_priv->mmio_virt;
 
 	preempt_disable();
+#ifdef __NetBSD__
+	if (atomic_cmpxchg((atomic_t *)(fifo_mem + SVGA_FIFO_BUSY), 0, 1) == 0)
+#else
 	if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
+#endif
 		vmw_write(dev_priv, SVGA_REG_SYNC, reason);
 	preempt_enable();
 }
@@ -224,11 +232,42 @@ static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
 			       unsigned long timeout)
 {
 	int ret = 0;
+#ifndef __NetBSD__
 	unsigned long end_jiffies = jiffies + timeout;
 	DEFINE_WAIT(__wait);
+#endif
 
 	DRM_INFO("Fifo wait noirq.\n");
 
+#ifdef __NetBSD__
+    if (timeout == 0) {
+        timeout = 1;
+    }
+    mutex_lock(&dev_priv->fifo.fifo_mutex);
+    while (!vmw_fifo_is_full(dev_priv, bytes)) {
+        if (timeout == 0) {
+            ret = -EBUSY;
+		    DRM_ERROR("SVGA device lockup.\n");
+            break;
+        }
+        if (interruptible) {
+            ret = cv_timedwait_sig(&dev_priv->fifo_queue,
+                    &dev_priv->fifo.fifo_mutex.mtx_lock, 1);
+            if (ret == EWOULDBLOCK) {
+                ret = 0;
+            } else if (ret == ERESTART || ret == EINTR) {
+                ret = -ERESTARTSYS;
+                break;
+            }
+        } else {
+            cv_timedwait(&dev_priv->fifo_queue,
+                    &dev_priv->fifo.fifo_mutex.mtx_lock, 1);
+        }
+        timeout--;  /* XXX: not accurate! */
+    }
+    cv_broadcast(&dev_priv->fifo_queue);
+    mutex_unlock(&dev_priv->fifo.fifo_mutex);
+#else
 	for (;;) {
 		prepare_to_wait(&dev_priv->fifo_queue, &__wait,
 				(interruptible) ?
@@ -248,6 +287,7 @@ static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
 	}
 	finish_wait(&dev_priv->fifo_queue, &__wait);
 	wake_up_all(&dev_priv->fifo_queue);
+#endif
 	DRM_INFO("Fifo noirq exit.\n");
 	return ret;
 }
@@ -269,6 +309,32 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
 	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
 			       &dev_priv->fifo_queue_waiters);
 
+#ifdef __NetBSD__
+    mutex_lock(&dev_priv->fifo.fifo_mutex);
+    while (!vmw_fifo_is_full(dev_priv, bytes)) {
+        if (interruptible) {
+            ret = cv_timedwait_sig(&dev_priv->fifo_queue,
+                    &dev_priv->fifo.fifo_mutex.mtx_lock, timeout);
+            if (ret) {
+                if (ret == EWOULDBLOCK) {
+                    ret = -EBUSY;
+                } else {
+                    ret = -ERESTARTSYS;
+                }
+                break;
+            }
+        } else {
+            ret = cv_timedwait(&dev_priv->fifo_queue,
+                    &dev_priv->fifo.fifo_mutex.mtx_lock, timeout);
+            if (ret) {
+                ret = -EBUSY;
+                break;
+            }
+        }
+        /* TODO: reduce timeout if we loop */
+    }
+    mutex_unlock(&dev_priv->fifo.fifo_mutex);
+#else
 	if (interruptible)
 		ret = wait_event_interruptible_timeout
 		    (dev_priv->fifo_queue,
@@ -282,7 +348,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
 		ret = -EBUSY;
 	else if (likely(ret > 0))
 		ret = 0;
-
+#endif
 	vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
 				  &dev_priv->fifo_queue_waiters);
 
@@ -403,7 +469,9 @@ void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
 	if (IS_ERR_OR_NULL(ret)) {
 		DRM_ERROR("Fifo reserve failure of %u bytes.\n",
 			  (unsigned) bytes);
+#ifndef __NetBSD__
 		dump_stack();
+#endif
 		return NULL;
 	}
 
@@ -533,7 +601,9 @@ void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
  */
 int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible)
 {
+#ifndef __NetBSD__
 	might_sleep();
+#endif
 
 	if (dev_priv->cman)
 		return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
diff --git a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_ioctl.c b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_ioctl.c
index cd94dcbb..159fc60e 100644
--- a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_ioctl.c
@@ -407,7 +407,15 @@ out_clips:
 	return ret;
 }
 
+#ifdef __NetBSD__
+void
+vmw_fops_ping_host(struct drm_device *dev)
+{
+    struct vmw_private *dev_priv = (struct vmw_private *)dev->dev_private;
 
+    vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
+}
+#else
 /**
  * vmw_fops_poll - wrapper around the drm_poll function
  *
@@ -449,3 +457,4 @@ ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
 	return drm_read(filp, buffer, count, offset);
 }
+#endif
diff --git a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_irq.c b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_irq.c
index e501a446..00ae3615 100644
--- a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_irq.c
+++ b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_irq.c
@@ -35,29 +35,65 @@ __KERNEL_RCSID(0, "$NetBSD: vmwgfx_irq.c,v 1.2 2018/08/27 04:58:37 riastradh Exp
 
 #define VMW_FENCE_WRAP (1 << 24)
 
+#ifdef __NetBSD__
+
+/*
+ * XXX: not correct?
+ * the affected locks are taken from softint but they may be blocked for
+ * a "long" period by thread context, e.g. vmw_cmdbuf_man_idle ?
+ * However, _bh apparently blocks whole softirq machinery, so is it any better?
+ */
+
+#define spin_lock_bh spin_lock
+#define spin_unlock_bh spin_unlock
+
+#endif
+
+#ifdef __NetBSD__
+irqreturn_t vmw_irq_handler(void *arg)
+#else
 irqreturn_t vmw_irq_handler(int irq, void *arg)
+#endif
 {
 	struct drm_device *dev = (struct drm_device *)arg;
 	struct vmw_private *dev_priv = vmw_priv(dev);
 	uint32_t status, masked_status;
 
+#ifdef __NetBSD__
+    status = bus_space_read_4(dev_priv->dev->pdev->pd_pa.pa_iot,
+            dev_priv->io_bsh,
+            VMWGFX_IRQSTATUS_PORT);
+    masked_status = status & dev_priv->irq_mask;
+#else
 	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 	masked_status = status & READ_ONCE(dev_priv->irq_mask);
+#endif
 
 	if (likely(status))
+#ifdef __NetBSD__
+		bus_space_write_4(dev_priv->dev->pdev->pd_pa.pa_iot, dev_priv->io_bsh,
+                VMWGFX_IRQSTATUS_PORT, status);
+#else
 		outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+#endif
 
 	if (!status)
 		return IRQ_NONE;
 
 	if (masked_status & (SVGA_IRQFLAG_ANY_FENCE |
 			     SVGA_IRQFLAG_FENCE_GOAL)) {
-		vmw_fences_update(dev_priv->fman);
-		wake_up_all(&dev_priv->fence_queue);
+		vmw_fences_update(dev_priv->fman, true);
 	}
 
-	if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
+	if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS) {
+#ifdef __NetBSD__
+        mutex_lock(&dev_priv->fifo.fifo_mutex);
+        cv_broadcast(&dev_priv->fifo_queue);
+        mutex_unlock(&dev_priv->fifo.fifo_mutex);
+#else
 		wake_up_all(&dev_priv->fifo_queue);
+#endif
+    }
 
 	if (masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
 			     SVGA_IRQFLAG_ERROR))
@@ -81,7 +117,7 @@ void vmw_update_seqno(struct vmw_private *dev_priv,
 	if (dev_priv->last_read_seqno != seqno) {
 		dev_priv->last_read_seqno = seqno;
 		vmw_marker_pull(&fifo_state->marker_queue, seqno);
-		vmw_fences_update(dev_priv->fman);
+		vmw_fences_update(dev_priv->fman, false);
 	}
 }
 
@@ -123,13 +159,26 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
 {
 	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
 
+#ifndef __NetBSD__
 	uint32_t count = 0;
+#endif
 	uint32_t signal_seq;
 	int ret;
+#ifdef __NetBSD__
+    struct bintime bintimeout = ms2bintime(hztoms(timeout));
+    struct bintime timeout_end;
+#else
 	unsigned long end_jiffies = jiffies + timeout;
+#endif
 	bool (*wait_condition)(struct vmw_private *, uint32_t);
+#ifndef __NetBSD__
 	DEFINE_WAIT(__wait);
+#endif
 
+#ifdef __NetBSD__
+    getbintime(&timeout_end);
+    bintime_add(&timeout_end, &bintimeout);
+#endif
 	wait_condition = (fifo_idle) ? &vmw_fifo_idle :
 		&vmw_seqno_passed;
 
@@ -150,6 +199,35 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
 	signal_seq = atomic_read(&dev_priv->marker_seq);
 	ret = 0;
 
+#ifdef __NetBSD__
+    while (! wait_condition(dev_priv, seqno)) {
+        struct bintime current_time;
+        getbintime(&current_time);
+        if ((current_time.sec > timeout_end.sec)
+                || (current_time.sec == timeout_end.sec
+                && current_time.frac >= timeout_end.frac)) {
+            // timeout
+            break;
+        }
+        /*
+         * XXX: this is just grabbing some lock for the benefit of cv
+         */
+        spin_lock(&dev_priv->hw_lock);
+        if (interruptible) {
+            int cvret = cv_timedwait_sig(&dev_priv->fence_queue,
+                    &dev_priv->hw_lock.sl_lock, 1);
+            if (cvret == ERESTART) {
+                spin_unlock(&dev_priv->hw_lock);
+                ret = -ERESTARTSYS;
+                break;
+            }
+        } else {
+            cv_timedwait(&dev_priv->fence_queue,
+                    &dev_priv->hw_lock.sl_lock, 1);
+        }
+        spin_unlock(&dev_priv->hw_lock);
+    }
+#else
 	for (;;) {
 		prepare_to_wait(&dev_priv->fence_queue, &__wait,
 				(interruptible) ?
@@ -180,12 +258,17 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
 		}
 	}
 	finish_wait(&dev_priv->fence_queue, &__wait);
+#endif
 	if (ret == 0 && fifo_idle) {
 		u32 *fifo_mem = dev_priv->mmio_virt;
 
 		vmw_mmio_write(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
 	}
+#ifdef __NetBSD__
+    cv_broadcast(&dev_priv->fence_queue);
+#else
 	wake_up_all(&dev_priv->fence_queue);
+#endif
 out_err:
 	if (fifo_idle)
 		up_read(&fifo_state->rwsem);
@@ -198,7 +281,12 @@ void vmw_generic_waiter_add(struct vmw_private *dev_priv,
 {
 	spin_lock_bh(&dev_priv->waiter_lock);
 	if ((*waiter_count)++ == 0) {
+#ifdef __NetBSD__
+        bus_space_write_4(dev_priv->dev->pdev->pd_pa.pa_iot, dev_priv->io_bsh,
+                VMWGFX_IRQSTATUS_PORT, flag);
+#else
 		outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+#endif
 		dev_priv->irq_mask |= flag;
 		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
 	}
@@ -246,6 +334,10 @@ int vmw_wait_seqno(struct vmw_private *dev_priv,
 {
 	long ret;
 	struct vmw_fifo_state *fifo = &dev_priv->fifo;
+#ifdef __NetBSD__
+    struct bintime bintimeout = hz2bintime(timeout);
+    struct bintime timeout_epsilon = ms2bintime(10);
+#endif
 
 	if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
 		return 0;
@@ -265,6 +357,31 @@ int vmw_wait_seqno(struct vmw_private *dev_priv,
 
 	vmw_seqno_waiter_add(dev_priv);
 
+#ifdef __NetBSD__
+    do {
+        /* XXX need to grab some lock for cv_wait */
+        spin_lock(&dev_priv->hw_lock);
+        if (interruptible) {
+            ret = cv_timedwaitbt_sig(&dev_priv->fence_queue,
+                    &dev_priv->hw_lock.sl_lock,
+                    &bintimeout, &timeout_epsilon);
+        } else {
+            ret = cv_timedwaitbt(&dev_priv->fence_queue,
+                    &dev_priv->hw_lock.sl_lock,
+                    &bintimeout, &timeout_epsilon);
+        }
+        spin_unlock(&dev_priv->hw_lock);
+        if (ret == EWOULDBLOCK) {
+            ret = 0;
+            break;
+        } else if (ret == ERESTART || ret == EINTR) {
+            ret = -ERESTARTSYS;
+            break;
+        } else {
+            ret = 1;
+        }
+    } while (!vmw_seqno_passed(dev_priv, seqno));
+#else
 	if (interruptible)
 		ret = wait_event_interruptible_timeout
 		    (dev_priv->fence_queue,
@@ -275,6 +392,7 @@ int vmw_wait_seqno(struct vmw_private *dev_priv,
 		    (dev_priv->fence_queue,
 		     vmw_seqno_passed(dev_priv, seqno),
 		     timeout);
+#endif
 
 	vmw_seqno_waiter_remove(dev_priv);
 
@@ -294,8 +412,16 @@ void vmw_irq_preinstall(struct drm_device *dev)
 	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
 		return;
 
+#ifdef __NetBSD__
+    status = bus_space_read_4(dev_priv->dev->pdev->pd_pa.pa_iot,
+            dev_priv->io_bsh,
+            VMWGFX_IRQSTATUS_PORT);
+    bus_space_write_4(dev_priv->dev->pdev->pd_pa.pa_iot, dev_priv->io_bsh,
+            VMWGFX_IRQSTATUS_PORT, status);
+#else
 	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 	outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+#endif
 }
 
 int vmw_irq_postinstall(struct drm_device *dev)
@@ -313,6 +439,13 @@ void vmw_irq_uninstall(struct drm_device *dev)
 
 	vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
 
+#ifdef __NetBSD__
+    status = bus_space_read_4(dev_priv->dev->pdev->pd_pa.pa_iot, dev_priv->io_bsh,
+            VMWGFX_IRQSTATUS_PORT);
+    bus_space_write_4(dev_priv->dev->pdev->pd_pa.pa_iot, dev_priv->io_bsh,
+            VMWGFX_IRQSTATUS_PORT, status);
+#else
 	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 	outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+#endif
 }
diff --git a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_kms.c b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_kms.c
index 7181b178..33d9f1fa 100644
--- a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_kms.c
+++ b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_kms.c
@@ -340,7 +340,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
 		/* Image is unsigned pointer. */
 		for (i = 0; i < box->h; i++)
 			memcpy(srf->snooper.image + i * 64,
-			       virtual + i * cmd->dma.guest.pitch,
+			       ((char *)virtual) + i * cmd->dma.guest.pitch,
 			       box->w * 4);
 	}
 
diff --git a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_marker.c b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_marker.c
index aa1ba70c..c07a9d18 100644
--- a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_marker.c
+++ b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_marker.c
@@ -125,6 +125,9 @@ static u64 vmw_fifo_lag(struct vmw_marker_queue *queue)
 static bool vmw_lag_lt(struct vmw_marker_queue *queue,
 		       uint32_t us)
 {
+#ifndef NSEC_PER_USEC
+#define NSEC_PER_USEC 1000L
+#endif
 	u64 cond = (u64) us * NSEC_PER_USEC;
 
 	return vmw_fifo_lag(queue) <= cond;
diff --git a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_overlay.c b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_overlay.c
index 43dcaa55..65eb5814 100644
--- a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_overlay.c
+++ b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_overlay.c
@@ -587,7 +587,11 @@ int vmw_overlay_init(struct vmw_private *dev_priv)
 	if (!overlay)
 		return -ENOMEM;
 
+#ifdef  __NetBSD__
+	linux_mutex_init(&overlay->mutex);
+#else
 	mutex_init(&overlay->mutex);
+#endif
 	for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
 		overlay->stream[i].buf = NULL;
 		overlay->stream[i].paused = false;
diff --git a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_prime.c b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_prime.c
index 35f2f6eb..3474a302 100644
--- a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_prime.c
+++ b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_prime.c
@@ -100,8 +100,14 @@ static void vmw_prime_dmabuf_kunmap(struct dma_buf *dma_buf,
 
 }
 
+#ifdef __NetBSD__
+static int vmw_prime_dmabuf_mmap(struct dma_buf *dma_buf, off_t *offp,
+        size_t size, int port, int *flagsp, int *advicep,
+        struct uvm_object **uobjp, int *maxprotp)
+#else
 static int vmw_prime_dmabuf_mmap(struct dma_buf *dma_buf,
 				 struct vm_area_struct *vma)
+#endif
 {
 	WARN_ONCE(true, "Attempted use of dmabuf mmap. Bad.\n");
 	return -ENOSYS;
diff --git a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_resource.c b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_resource.c
index ca7e2ad7..4df32c45 100644
--- a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_resource.c
+++ b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_resource.c
@@ -1086,7 +1086,7 @@ int vmw_dumb_destroy(struct drm_file *file_priv,
 static int vmw_resource_buf_alloc(struct vmw_resource *res,
 				  bool interruptible)
 {
-	unsigned long size =
+	unsigned long size __diagused =
 		(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
 	struct vmw_dma_buffer *backup;
 	int ret;
diff --git a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_shader.c b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_shader.c
index 6e4ff223..ba8143b7 100644
--- a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_shader.c
+++ b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_shader.c
@@ -462,7 +462,7 @@ static int vmw_dx_shader_bind(struct vmw_resource *res,
 			      struct ttm_validate_buffer *val_buf)
 {
 	struct vmw_private *dev_priv = res->dev_priv;
-	struct ttm_buffer_object *bo = val_buf->bo;
+	struct ttm_buffer_object *bo __diagused = val_buf->bo;
 
 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
 	mutex_lock(&dev_priv->binding_mutex);
diff --git a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_so.c b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_so.c
index cbd7722b..9875b47a 100644
--- a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_so.c
+++ b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_so.c
@@ -297,7 +297,12 @@ static void vmw_view_res_free(struct vmw_resource *res)
 
 	vmw_resource_unreference(&view->cotable);
 	vmw_resource_unreference(&view->srf);
+#ifdef __NetBSD__
+    /* XXX */
+	kfree(view);
+#else
 	kfree_rcu(view, rcu);
+#endif
 	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
 }
 
diff --git a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_surface.c b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_surface.c
index d0254374..9c55f74a 100644
--- a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_surface.c
+++ b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_surface.c
@@ -1088,7 +1088,11 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
 		cmd2->header.size = cmd_len;
 		cmd2->body.sid = srf->res.id;
 		cmd2->body.surfaceFlags = srf->flags;
+#ifdef __NetBSD__
+		cmd2->body.format = htole32(srf->format);
+#else
 		cmd2->body.format = cpu_to_le32(srf->format);
+#endif
 		cmd2->body.numMipLevels = srf->mip_levels[0];
 		cmd2->body.multisampleCount = srf->multisample_count;
 		cmd2->body.autogenFilter = srf->autogen_filter;
@@ -1101,7 +1105,11 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
 		cmd->header.size = cmd_len;
 		cmd->body.sid = srf->res.id;
 		cmd->body.surfaceFlags = srf->flags;
+#ifdef __NetBSD__
+		cmd->body.format = htole32(srf->format);
+#else
 		cmd->body.format = cpu_to_le32(srf->format);
+#endif
 		cmd->body.numMipLevels = srf->mip_levels[0];
 		cmd->body.multisampleCount = srf->multisample_count;
 		cmd->body.autogenFilter = srf->autogen_filter;
@@ -1169,7 +1177,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
 				 struct ttm_validate_buffer *val_buf)
 {
 	struct vmw_private *dev_priv = res->dev_priv;
-	struct ttm_buffer_object *bo = val_buf->bo;
+	struct ttm_buffer_object *bo __diagused = val_buf->bo;
 	struct vmw_fence_obj *fence;
 
 	struct {
diff --git a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_ttm_glue.c b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_ttm_glue.c
index 4b99af0c..2a2fa2ed 100644
--- a/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/src/sys/external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -33,8 +33,26 @@ __KERNEL_RCSID(0, "$NetBSD: vmwgfx_ttm_glue.c,v 1.2 2018/08/27 04:58:37 riastrad
 #include <drm/drmP.h>
 #include "vmwgfx_drv.h"
 
+#ifdef __NetBSD__
+int vmw_mmap(struct drm_device *dev, off_t off, size_t len, int prot,
+        struct uvm_object **uobj, voff_t *offp, struct file *filp)
+#else
 int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
+#endif
 {
+#ifdef __NetBSD__
+	struct vmw_private *dev_priv;
+
+   	if (unlikely(off < VMWGFX_FILE_PAGE_OFFSET)) {
+		DRM_ERROR("Illegal attempt to mmap old fifo space.\n");
+		return -EINVAL;
+	}
+
+    dev_priv = vmw_priv(dev);
+
+    return ttm_bo_mmap_object(&dev_priv->bdev, off, len, prot, uobj, offp,
+            filp);
+#else
 	struct drm_file *file_priv;
 	struct vmw_private *dev_priv;
 
@@ -46,6 +64,7 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
 	file_priv = filp->private_data;
 	dev_priv = vmw_priv(file_priv->minor->dev);
 	return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
+#endif
 }
 
 static int vmw_ttm_mem_global_init(struct drm_global_reference *ref)
diff --git a/src/sys/external/bsd/drm2/dist/include/drm/drmP.h b/src/sys/external/bsd/drm2/dist/include/drm/drmP.h
index ca2a2845..f7dd1992 100644
--- a/src/sys/external/bsd/drm2/dist/include/drm/drmP.h
+++ b/src/sys/external/bsd/drm2/dist/include/drm/drmP.h
@@ -712,6 +712,9 @@ struct drm_driver {
 
 #ifdef __NetBSD__
 	int (*ioctl_override)(struct file *, unsigned long, void *);
+    /* XXX for vmwgfx */
+    void (*fops_pre_read)(struct drm_device *);
+    void (*fops_pre_poll)(struct drm_device *);
 #endif
 
 	/* List of devices hanging off this driver with stealth attach. */
diff --git a/src/sys/external/bsd/drm2/dist/include/drm/ttm/ttm_object.h b/src/sys/external/bsd/drm2/dist/include/drm/ttm/ttm_object.h
index c1372cb4..89db7128 100644
--- a/src/sys/external/bsd/drm2/dist/include/drm/ttm/ttm_object.h
+++ b/src/sys/external/bsd/drm2/dist/include/drm/ttm/ttm_object.h
@@ -323,8 +323,13 @@ ttm_object_device_init(struct ttm_mem_global *mem_glob,
 
 extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
 
+#ifdef __NetBSD__
+#define ttm_base_object_kfree(__object, __base)\
+	kfree(__object)
+#else
 #define ttm_base_object_kfree(__object, __base)\
 	kfree_rcu(__object, __base.rhead)
+#endif
 
 extern int ttm_prime_object_init(struct ttm_object_file *tfile,
 				 size_t size,
@@ -350,6 +355,12 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
 				  uint32_t handle, uint32_t flags,
 				  int *prime_fd);
 
+#ifdef __NetBSD__
+#define ttm_prime_object_kfree(__obj, __prime)		\
+	kfree(__obj)
+#else
 #define ttm_prime_object_kfree(__obj, __prime)		\
 	kfree_rcu(__obj, __prime.base.rhead)
 #endif
+
+#endif
diff --git a/src/sys/external/bsd/drm2/drm/drm_cdevsw.c b/src/sys/external/bsd/drm2/drm/drm_cdevsw.c
index 20073f5a..0745fc53 100644
--- a/src/sys/external/bsd/drm2/drm/drm_cdevsw.c
+++ b/src/sys/external/bsd/drm2/drm/drm_cdevsw.c
@@ -276,9 +276,13 @@ drm_read(struct file *fp, off_t *off, struct uio *uio, kauth_cred_t cred,
 {
 	struct drm_file *const file = fp->f_data;
 	struct drm_pending_event *event;
+	struct drm_device *const dev = file->minor->dev;
 	bool first;
 	int error = 0;
 
+	if (dev->driver->fops_pre_read)
+		dev->driver->fops_pre_read(dev);
+
 	for (first = true; ; first = false) {
 		int f = 0;
 
@@ -369,6 +373,9 @@ drm_poll(struct file *fp __unused, int events __unused)
 	int revents = 0;
 	unsigned long irqflags;
 
+	if (dev->driver->fops_pre_poll)
+		dev->driver->fops_pre_poll(dev);
+
 	if (!ISSET(events, (POLLIN | POLLRDNORM)))
 		return 0;
 
diff --git a/src/sys/external/bsd/drm2/drm/drmfb.c b/src/sys/external/bsd/drm2/drm/drmfb.c
index ab8b80bc..08489fa7 100644
--- a/src/sys/external/bsd/drm2/drm/drmfb.c
+++ b/src/sys/external/bsd/drm2/drm/drmfb.c
@@ -76,6 +76,8 @@ static int	drmfb_genfb_enable_polling(void *);
 static int	drmfb_genfb_disable_polling(void *);
 static bool	drmfb_genfb_setmode(struct genfb_softc *, int);
 
+static void drmfb_genfb_dirty(void *);
+
 static const struct genfb_mode_callback drmfb_genfb_mode_callback = {
 	.gmc_setmode = drmfb_genfb_setmode,
 };
@@ -155,6 +157,8 @@ drmfb_attach(struct drmfb_softc *sc, const struct drmfb_attach_args *da)
 	genfb_ops.genfb_mmap = drmfb_genfb_mmap;
 	genfb_ops.genfb_enable_polling = drmfb_genfb_enable_polling;
 	genfb_ops.genfb_disable_polling = drmfb_genfb_disable_polling;
+	if (sc->sc_da.da_params->dp_dirty)
+		genfb_ops.genfb_dirty = drmfb_genfb_dirty;
 
 	error = genfb_attach(&sc->sc_genfb, &genfb_ops);
 	if (error) {
@@ -309,3 +313,13 @@ drmfb_shutdown(struct drmfb_softc *sc, int flags __unused)
 	genfb_enable_polling(sc->sc_da.da_dev);
 	return true;
 }
+
+static void
+drmfb_genfb_dirty(void *cookie)
+{
+	struct genfb_softc *const genfb = cookie;
+	struct drmfb_softc *const sc = container_of(genfb, struct drmfb_softc,
+	    sc_genfb);
+
+    sc->sc_da.da_params->dp_dirty(sc);
+}
diff --git a/src/sys/external/bsd/drm2/include/drm/drmfb.h b/src/sys/external/bsd/drm2/include/drm/drmfb.h
index b9929544..91efe974 100644
--- a/src/sys/external/bsd/drm2/include/drm/drmfb.h
+++ b/src/sys/external/bsd/drm2/include/drm/drmfb.h
@@ -69,6 +69,8 @@ struct drmfb_params {
 	/* XXX Kludge!  */
 	bool		(*dp_is_vga_console)(struct drm_device *);
 	void		(*dp_disable_vga)(struct drm_device *);
+
+	void		(*dp_dirty)(struct drmfb_softc *);
 };
 
 struct drmfb_attach_args {
diff --git a/src/sys/external/bsd/drm2/include/drm/ttm/ttm_lock.h b/src/sys/external/bsd/drm2/include/drm/ttm/ttm_lock.h
new file mode 100644
index 00000000..94017b8d
--- /dev/null
+++ b/src/sys/external/bsd/drm2/include/drm/ttm/ttm_lock.h
@@ -0,0 +1,42 @@
+#ifndef _TTM_LOCK_H_
+#define _TTM_LOCK_H_
+
+#include <sys/condvar.h>
+#include <sys/mutex.h>
+
+#include <ttm/ttm_object.h>
+
+struct ttm_lock {
+    struct ttm_base_object base;
+    kmutex_t lock;
+    kcondvar_t cv;
+    int32_t rw;
+    uint32_t flags;
+    bool kill_takers;
+    int signal;
+    struct ttm_object_file *vt_holder;
+};
+
+extern void ttm_lock_init(struct ttm_lock *lock);
+extern void ttm_read_unlock(struct ttm_lock *lock);
+extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
+extern int ttm_read_trylock(struct ttm_lock *lock, bool interruptible);
+extern void ttm_write_unlock(struct ttm_lock *lock);
+extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
+extern void ttm_lock_downgrade(struct ttm_lock *lock);
+extern void ttm_suspend_lock(struct ttm_lock *lock);
+extern void ttm_suspend_unlock(struct ttm_lock *lock);
+extern int ttm_vt_lock(struct ttm_lock *lock, bool interruptible,
+		       struct ttm_object_file *tfile);
+
+extern int ttm_vt_unlock(struct ttm_lock *lock);
+
+static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val,
+				     int signal)
+{
+	lock->kill_takers = val;
+	if (val)
+		lock->signal = signal;
+}
+
+#endif
diff --git a/src/sys/external/bsd/drm2/include/linux/idr.h b/src/sys/external/bsd/drm2/include/linux/idr.h
index 4003017e..7cd85e67 100644
--- a/src/sys/external/bsd/drm2/include/linux/idr.h
+++ b/src/sys/external/bsd/drm2/include/linux/idr.h
@@ -123,4 +123,24 @@ ida_simple_remove(struct ida *ida, unsigned int id)
 	ida_remove(ida, id);
 }
 
+static inline int
+ida_pre_get(struct ida *ida, gfp_t gfp)
+{
+
+    return 1;
+}
+
+static inline int
+ida_get_new(struct ida *ida, int *id)
+{
+    /* TODO */
+    int r = ida_simple_get(ida, 0, INT_MAX, GFP_KERNEL);
+    if (r < 0) {
+        *id = -1;
+        return r;
+    }
+    *id = r;
+    return 0;
+}
+
 #endif  /* _LINUX_IDR_H_ */
diff --git a/src/sys/external/bsd/drm2/include/linux/spinlock.h b/src/sys/external/bsd/drm2/include/linux/spinlock.h
index 7452f694..0fb18f23 100644
--- a/src/sys/external/bsd/drm2/include/linux/spinlock.h
+++ b/src/sys/external/bsd/drm2/include/linux/spinlock.h
@@ -139,6 +139,26 @@ rwlock_destroy(rwlock_t *rw)
 	mutex_destroy(&rw->rw_lock);
 }
 
+static inline void
+write_lock(rwlock_t *rw)
+{
+
+	for (;;) {
+		mutex_spin_enter(&rw->rw_lock);
+		if (rw->rw_nreaders == 0)
+			break;
+		mutex_spin_exit(&rw->rw_lock);
+	}
+}
+
+static inline void
+write_unlock(rwlock_t *rw)
+{
+
+	KASSERT(rw->rw_nreaders == 0);
+	mutex_spin_exit(&rw->rw_lock);
+}
+
 static inline void
 write_lock_irq(rwlock_t *rw)
 {
diff --git a/src/sys/external/bsd/drm2/include/linux/wait.h b/src/sys/external/bsd/drm2/include/linux/wait.h
index 84b4f23b..6ecc010a 100644
--- a/src/sys/external/bsd/drm2/include/linux/wait.h
+++ b/src/sys/external/bsd/drm2/include/linux/wait.h
@@ -32,4 +32,10 @@
 #ifndef _LINUX_WAIT_H_
 #define _LINUX_WAIT_H_
 
+#include <sys/condvar.h>
+
+typedef kcondvar_t wait_queue_head_t;
+
+#define wake_up_all(x) cv_broadcast(x)
+
 #endif  /* _LINUX_WAIT_H_ */
diff --git a/src/sys/external/bsd/drm2/pci/files.drmkms_pci b/src/sys/external/bsd/drm2/pci/files.drmkms_pci
index ce6058d2..ac83629f 100644
--- a/src/sys/external/bsd/drm2/pci/files.drmkms_pci
+++ b/src/sys/external/bsd/drm2/pci/files.drmkms_pci
@@ -15,3 +15,4 @@ include "external/bsd/drm2/i915drm/files.i915drmkms"
 include "external/bsd/drm2/radeon/files.radeon"
 include "external/bsd/drm2/nouveau/files.nouveau"
 include "external/bsd/drm2/via/files.via"
+include "external/bsd/drm2/vmwgfx/files.vmwgfx"
diff --git a/src/sys/external/bsd/drm2/ttm/files.ttm b/src/sys/external/bsd/drm2/ttm/files.ttm
index 2642b843..2e0011ec 100644
--- a/src/sys/external/bsd/drm2/ttm/files.ttm
+++ b/src/sys/external/bsd/drm2/ttm/files.ttm
@@ -14,10 +14,8 @@ file	external/bsd/drm2/dist/drm/ttm/ttm_bo_util.c		drmkms_ttm
 file	external/bsd/drm2/ttm/ttm_bo_vm.c			drmkms_ttm
 # Linux module goo.
 #file	external/bsd/drm2/dist/drm/ttm/ttm_module.c		drmkms_ttm
-# Used only by vmwgfx.  Needs porting for rcu -> pserialize.
-#file	external/bsd/drm2/dist/drm/ttm/ttm_object.c		drmkms_ttm
-# Used only by vmwgfx.  Needs porting.  Does silly things like SIGKILL.
-#file	external/bsd/drm2/dist/drm/ttm/ttm_lock.c		drmkms_ttm
+file	external/bsd/drm2/dist/drm/ttm/ttm_object.c		drmkms_ttm
+file	external/bsd/drm2/ttm/ttm_lock.c		drmkms_ttm
 file	external/bsd/drm2/dist/drm/ttm/ttm_execbuf_util.c	drmkms_ttm
 # Replaced locally by ttm_bus_dma.c.
 #file	external/bsd/drm2/dist/drm/ttm/ttm_page_alloc.c		drmkms_ttm
diff --git a/src/sys/external/bsd/drm2/ttm/ttm_lock.c b/src/sys/external/bsd/drm2/ttm/ttm_lock.c
new file mode 100644
index 00000000..860ed092
--- /dev/null
+++ b/src/sys/external/bsd/drm2/ttm/ttm_lock.c
@@ -0,0 +1,245 @@
+#include <sys/cdefs.h>
+#include <sys/types.h>
+#include <sys/condvar.h>
+#include <sys/errno.h>
+#include <sys/mutex.h>
+#include <sys/signal.h>
+
+#include <drm/ttm/ttm_lock.h>
+
+#define TTM_WRITE_LOCK_PENDING    (1 << 0)
+#define TTM_VT_LOCK_PENDING       (1 << 1)
+#define TTM_SUSPEND_LOCK_PENDING  (1 << 2)
+#define TTM_VT_LOCK               (1 << 3)
+#define TTM_SUSPEND_LOCK          (1 << 4)
+
+void
+ttm_lock_init(struct ttm_lock *lock)
+{
+    mutex_init(&lock->lock, MUTEX_SPIN, IPL_NONE);
+    cv_init(&lock->cv, "ttm_lock");
+    lock->rw = 0;
+    lock->flags = 0;
+    lock->kill_takers = false;
+    lock->signal = SIGKILL;
+}
+
+void
+ttm_read_unlock(struct ttm_lock *lock)
+{
+    mutex_enter(&lock->lock);
+    lock->rw = 0;
+    cv_broadcast(&lock->cv);
+    mutex_exit(&lock->lock);
+}
+
+static int
+handle_kill_takers(struct ttm_lock *lock)
+{
+    KASSERT(lock->kill_takers);
+
+    /* TODO: really do send the signal to curproc ? */
+
+    mutex_exit(&lock->lock);
+
+    return -EBADF;
+}
+
+int
+ttm_read_lock(struct ttm_lock *lock, bool interruptible)
+{
+    bool locked = false;
+
+    mutex_enter(&lock->lock);
+
+    while (!locked) {
+        if (__predict_false(lock->kill_takers)) {
+            return handle_kill_takers(lock);
+        }
+        if (lock->rw >= 0 && lock->flags == 0) {
+            ++lock->rw;
+            locked = true;
+        } else if (interruptible) {
+            int err = cv_wait_sig(&lock->cv, &lock->lock);
+            if (err) {
+                break;
+            }
+        } else {
+            cv_wait(&lock->cv, &lock->lock);
+        }
+    }
+    mutex_exit(&lock->lock);
+    return locked ? 0 : -ERESTART;
+}
+
+int
+ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
+{
+    bool locked = false;
+    bool lockable = true;
+
+    mutex_enter(&lock->lock);
+
+    while (!locked && lockable) {
+        if (__predict_false(lock->kill_takers)) {
+            return handle_kill_takers(lock);
+        }
+        if (lock->rw >= 0 && lock->flags == 0) {
+            ++lock->rw;
+            locked = true;
+        } else if (lock->flags == 0) {
+            lockable = false;
+        } else if (interruptible) {
+            int err = cv_wait_sig(&lock->cv, &lock->lock);
+            if (err) {
+                break;
+            }
+        } else {
+            cv_wait(&lock->cv, &lock->lock);
+        }
+    }
+    mutex_exit(&lock->lock);
+    if (locked) {
+        return 0;
+    } else if (lockable) {
+        return -ERESTART;
+    } else {
+        return -EBUSY;
+    }
+}
+
+void
+ttm_write_unlock(struct ttm_lock *lock)
+{
+    mutex_enter(&lock->lock);
+    lock->rw = 0;
+    cv_broadcast(&lock->cv);
+    mutex_exit(&lock->lock);
+}
+
+int
+ttm_write_lock(struct ttm_lock *lock, bool interruptible)
+{
+    bool locked = false;
+
+    mutex_enter(&lock->lock);
+
+    while (!locked) {
+        if (__predict_false(lock->kill_takers)) {
+            return handle_kill_takers(lock);
+        }
+        if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
+            lock->rw = -1;
+            lock->flags &= ~TTM_WRITE_LOCK_PENDING;
+            locked = true;
+        } else {
+            lock->flags |= TTM_WRITE_LOCK_PENDING;
+            if (interruptible) {
+                int err = cv_wait_sig(&lock->cv, &lock->lock);
+                if (err) {
+                    lock->flags &= ~TTM_WRITE_LOCK_PENDING;
+                    cv_broadcast(&lock->cv);
+                    break;
+                }
+            } else {
+                cv_wait(&lock->cv, &lock->lock);
+            }
+        }
+    }
+    mutex_exit(&lock->lock);
+    return locked ? 0 : -ERESTART;
+}
+
+void
+ttm_suspend_lock(struct ttm_lock *lock)
+{
+    bool locked = false;
+
+    mutex_enter(&lock->lock);
+
+    while (!locked) {
+        if (lock->rw == 0) {
+            lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
+            lock->flags |= TTM_SUSPEND_LOCK;
+            locked = true;
+        } else {
+            lock->flags |= TTM_SUSPEND_LOCK_PENDING;
+            cv_wait(&lock->cv, &lock->lock);
+        }
+    }
+    mutex_exit(&lock->lock);
+}
+
+void
+ttm_suspend_unlock(struct ttm_lock *lock)
+{
+    mutex_enter(&lock->lock);
+    lock->flags &= ~TTM_SUSPEND_LOCK;
+    cv_broadcast(&lock->cv);
+    mutex_exit(&lock->lock);
+}
+
+static void
+ttm_vt_lock_remove(struct ttm_base_object **p_base)
+{
+	struct ttm_base_object *base = *p_base;
+	struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
+
+	*p_base = NULL;
+    mutex_enter(&lock->lock);
+    KASSERT((lock->flags & TTM_VT_LOCK) != 0);
+    lock->flags &= ~TTM_VT_LOCK;
+    cv_broadcast(&lock->cv);
+    mutex_exit(&lock->lock);
+}
+
+int
+ttm_vt_lock(struct ttm_lock *lock, bool interruptible,
+		       struct ttm_object_file *tfile)
+{
+    bool locked = false;
+
+    mutex_enter(&lock->lock);
+
+    while (!locked) {
+        if (lock->rw == 0) {
+            lock->flags &= ~TTM_VT_LOCK_PENDING;
+            lock->flags |= TTM_VT_LOCK;
+            locked = true;
+        } else {
+            lock->flags |= TTM_VT_LOCK_PENDING;
+            if (interruptible) {
+                int err = cv_wait_sig(&lock->cv, &lock->lock);
+                if (err) {
+                    lock->flags &= ~TTM_VT_LOCK_PENDING;
+                    cv_broadcast(&lock->cv);
+                    break;
+                }
+            } else {
+                cv_wait(&lock->cv, &lock->lock);
+            }
+        }
+    }
+    mutex_exit(&lock->lock);
+    if (locked) {
+        int err = ttm_base_object_init(tfile, &lock->base, false,
+                ttm_lock_type, &ttm_vt_lock_remove, NULL);
+        if (err) {
+            mutex_enter(&lock->lock);
+            lock->flags &= ~TTM_VT_LOCK;
+            cv_broadcast(&lock->cv);
+            mutex_exit(&lock->lock);
+        } else
+            lock->vt_holder = tfile;
+        return err;
+    } else {
+        return -ERESTART;
+    }
+}
+
+int
+ttm_vt_unlock(struct ttm_lock *lock)
+{
+    return ttm_ref_object_base_unref(lock->vt_holder, lock->base.hash.key,
+            TTM_REF_USAGE);
+}
diff --git a/src/sys/external/bsd/drm2/vmwgfx/files.vmwgfx b/src/sys/external/bsd/drm2/vmwgfx/files.vmwgfx
new file mode 100644
index 00000000..5984c56f
--- /dev/null
+++ b/src/sys/external/bsd/drm2/vmwgfx/files.vmwgfx
@@ -0,0 +1,51 @@
+#	$NetBSD: files.i915drmkms,v 1.37 2019/02/04 08:18:07 mrg Exp $
+
+version	20180827
+
+define	vmwgfxfbbus { }
+device	vmwgfx: drmkms, drmkms_pci, vmwgfxfbbus
+attach	vmwgfx at pci
+
+device	vmwgfxfb: vmwgfxfbbus, drmfb, drmfb_pci, wsemuldisplaydev
+attach	vmwgfxfb at vmwgfxfbbus
+
+makeoptions	vmwgfx	CPPFLAGS+="-I$S/external/bsd/drm2/vmwgfx"
+
+makeoptions	vmwgfx	CPPFLAGS+="-I$S/external/bsd/drm2/dist/drm/vmwgfx"
+
+makeoptions	vmwgfx	"CWARNFLAGS.vmwgfx"+="-Wno-cast-qual -Wno-shadow -Wno-unused-but-set-variable -Wno-missing-field-initializers"
+
+file	external/bsd/drm2/vmwgfx/vmwgfx_module.c		vmwgfx
+file	external/bsd/drm2/vmwgfx/vmwgfx_pci.c		vmwgfx
+
+file	external/bsd/drm2/vmwgfx/vmwgfxfb.c		vmwgfxfb
+
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_binding.c	vmwgfx
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_buffer.c	vmwgfx
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_cmdbuf.c	vmwgfx
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_cmdbuf_res.c	vmwgfx
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_context.c	vmwgfx
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_cotable.c	vmwgfx
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_dmabuf.c	vmwgfx
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_drv.c	vmwgfx
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_execbuf.c	vmwgfx
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_fb.c	vmwgfx
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_fence.c	vmwgfx
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_fifo.c	vmwgfx
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_gmr.c	vmwgfx
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_gmrid_manager.c	vmwgfx
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_ioctl.c	vmwgfx
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_irq.c	vmwgfx
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_kms.c	vmwgfx
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_ldu.c	vmwgfx
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_marker.c	vmwgfx
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_mob.c	vmwgfx
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_overlay.c	vmwgfx
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_prime.c	vmwgfx
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_resource.c	vmwgfx
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_scrn.c	vmwgfx
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_shader.c	vmwgfx
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_so.c	vmwgfx
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_stdu.c	vmwgfx
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_surface.c	vmwgfx
+file	external/bsd/drm2/dist/drm/vmwgfx/vmwgfx_ttm_glue.c	vmwgfx
diff --git a/src/sys/external/bsd/drm2/vmwgfx/vmwgfx_module.c b/src/sys/external/bsd/drm2/vmwgfx/vmwgfx_module.c
new file mode 100644
index 00000000..e1bc61fe
--- /dev/null
+++ b/src/sys/external/bsd/drm2/vmwgfx/vmwgfx_module.c
@@ -0,0 +1,93 @@
+#include <sys/cdefs.h>
+
+#include <sys/types.h>
+#include <sys/module.h>
+#ifndef _MODULE
+#include <sys/once.h>
+#endif
+#include <sys/systm.h>
+
+#include <drm/drmP.h>
+
+MODULE(MODULE_CLASS_DRIVER, vmwgfx, "drmkms,drmkms_pci");
+
+#ifdef _MODULE
+#include "ioconf.c"
+#endif
+
+static int
+vmwgfx_init(void)
+{
+	int error;
+
+	error = drm_guarantee_initialized();
+	if (error)
+		return error;
+
+	return 0;
+}
+
+int	vmwgfx_guarantee_initialized(void); /* XXX */
+int
+vmwgfx_guarantee_initialized(void)
+{
+#ifdef _MODULE
+	return 0;
+#else
+	static ONCE_DECL(vmwgfx_init_once);
+
+	return RUN_ONCE(&vmwgfx_init_once, &vmwgfx_init);
+#endif
+}
+
+static void
+vmwgfx_fini(void)
+{
+}
+
+static int
+vmwgfx_modcmd(modcmd_t cmd, void *arg __unused)
+{
+    int error;
+
+    switch (cmd) {
+    case MODULE_CMD_INIT:
+#ifdef _MODULE
+        error = vmwgfx_init();
+#else
+        error = vmwgfx_guarantee_initialized();
+#endif
+        if (error) {
+            aprint_error("vmwgfx: failed to initialize: %d\n", error);
+            return error;
+        }
+#ifdef _MODULE
+		error = config_init_component(cfdriver_ioconf_vmwgfx,
+		    cfattach_ioconf_vmwgfx, cfdata_ioconf_vmwgfx);
+		if (error) {
+			aprint_error("vmwgfx: failed to init component"
+			    ": %d\n", error);
+			vmwgfx_fini();
+			return error;
+		}
+#endif
+    return 0;
+
+    case MODULE_CMD_FINI:
+#ifdef _MODULE
+		error = config_fini_component(cfdriver_ioconf_vmwgfx,
+		    cfattach_ioconf_vmwgfx, cfdata_ioconf_vmwgfx);
+		if (error) {
+			aprint_error("vmwgfx: failed to fini component"
+			    ": %d\n", error);
+			return error;
+		}
+#endif
+
+        vmwgfx_fini();
+        return 0;
+
+    default:
+        return ENOTTY;
+    }
+}
diff --git a/src/sys/external/bsd/drm2/vmwgfx/vmwgfx_pci.c b/src/sys/external/bsd/drm2/vmwgfx/vmwgfx_pci.c
new file mode 100644
index 00000000..4d53b116
--- /dev/null
+++ b/src/sys/external/bsd/drm2/vmwgfx/vmwgfx_pci.c
@@ -0,0 +1,85 @@
+#include <sys/cdefs.h>
+
+#include <sys/types.h>
+#include <sys/device.h>
+#include <sys/systm.h>
+
+#include <linux/pci.h>
+
+#include <drm/drmP.h>
+
+struct vmwgfx_softc {
+    device_t sc_dev;
+    struct pci_dev sc_pci_dev;
+    struct drm_device *sc_drm_dev;
+};
+
+static int vmwgfx_match(device_t, cfdata_t, void  *);
+static void vmwgfx_attach(device_t, device_t, void *);
+static int vmwgfx_detach(device_t, int);
+
+extern struct drm_driver *const vmwgfx_drm_driver;  /* XXX */
+
+CFATTACH_DECL_NEW(vmwgfx, sizeof(struct vmwgfx_softc),
+    vmwgfx_match, vmwgfx_attach, vmwgfx_detach, NULL);
+
+static int
+vmwgfx_match(device_t parent, cfdata_t match, void  *aux)
+{
+    extern int vmwgfx_guarantee_initialized(void);
+    const struct pci_attach_args *const pa = aux;
+    int error;
+
+    error = vmwgfx_guarantee_initialized();
+    if (error) {
+        aprint_error("vmwgfx: failed to initialize: %d\n", error);
+        return 0;
+    }
+
+    if (PCI_VENDOR(pa->pa_id) != 0x15AD)
+        return 0;
+    if (PCI_PRODUCT(pa->pa_id) != 0x0405)
+        return 0;
+
+    return 10;
+}
+
+static void
+vmwgfx_attach(device_t parent, device_t self, void *aux)
+{
+    struct vmwgfx_softc *const sc = device_private(self);
+    const struct pci_attach_args *const pa = aux;
+    int error;
+
+    pci_aprint_devinfo(pa, NULL);
+
+    linux_pci_dev_init(&sc->sc_pci_dev, self, device_parent(self), pa, 0);
+
+    error = -drm_pci_attach(self, pa, &sc->sc_pci_dev, vmwgfx_drm_driver,
+            0, &sc->sc_drm_dev);
+    if (error) {
+        aprint_error_dev(self, "unable to attach drm: %d\n", error);
+        return;
+    }
+}
+
+static int
+vmwgfx_detach(device_t self, int flags)
+{
+    struct vmwgfx_softc *const sc = device_private(self);
+    int error;
+
+    error = config_detach_children(self, flags);
+    if (error)
+        return error;
+    if (sc->sc_drm_dev == NULL)
+        goto out;
+    error = -drm_pci_detach(sc->sc_drm_dev, flags);
+    if (error)
+        return error;
+    sc->sc_drm_dev = NULL;
+
+out:
+    linux_pci_dev_destroy(&sc->sc_pci_dev);
+    return 0;
+}
diff --git a/src/sys/external/bsd/drm2/vmwgfx/vmwgfxfb.c b/src/sys/external/bsd/drm2/vmwgfx/vmwgfxfb.c
new file mode 100644
index 00000000..201417af
--- /dev/null
+++ b/src/sys/external/bsd/drm2/vmwgfx/vmwgfxfb.c
@@ -0,0 +1,151 @@
+#include <sys/types.h>
+#include <sys/bus.h>
+#include <sys/device.h>
+
+#include <drm/drmP.h>
+#include <drm/drmfb.h>
+#include <drm/drmfb_pci.h>
+
+#include "vmwgfx_drv.h"
+#include "vmwgfxfb.h"
+
+static int vmwgfxfb_match(device_t, cfdata_t, void *);
+static void vmwgfxfb_attach(device_t, device_t, void *);
+static int vmwgfxfb_detach(device_t, int);
+
+static paddr_t vmwgfxfb_drmfb_mmapfb(struct drmfb_softc *, off_t, int);
+
+static void vmwgfxfb_drmfb_dirty(struct drmfb_softc *);
+static void vmwgfxfb_do_dirty(void *);
+
+struct vmwgfxfb_softc {
+    struct drmfb_softc sc_drmfb;
+    struct drm_device *drm_dev;
+    struct vmw_dma_buffer *fb_bo;
+    void *fb_ptr;
+    bool drmfb_attached;
+    struct drm_fb_helper *drm_fb_helper;
+    struct drm_fb_helper_surface_size fb_sizes;
+    void *dirty_softint;
+};
+
+static const struct drmfb_params vmwgfxfb_drmfb_params = {
+	.dp_mmapfb = vmwgfxfb_drmfb_mmapfb,
+	.dp_mmap = drmfb_pci_mmap,
+	.dp_ioctl = drmfb_pci_ioctl,
+	.dp_is_vga_console = drmfb_pci_is_vga_console,
+    .dp_dirty = vmwgfxfb_drmfb_dirty
+};
+
+CFATTACH_DECL_NEW(vmwgfxfb, sizeof(struct vmwgfxfb_softc),
+    vmwgfxfb_match, vmwgfxfb_attach, vmwgfxfb_detach, NULL);
+
+static int
+vmwgfxfb_match(device_t parent, cfdata_t match, void *aux)
+{
+
+	return 1;
+}
+
+static void
+vmwgfxfb_attach(device_t parent, device_t self, void *aux)
+{
+	struct vmwgfxfb_softc *const sc = device_private(self);
+	const struct vmwgfxfb_attach_args *const ifa = aux;
+	int error;
+	struct drmfb_attach_args da;
+
+    sc->dirty_softint = softint_establish(SOFTINT_SERIAL | SOFTINT_MPSAFE,
+            vmwgfxfb_do_dirty, sc);
+
+	sc->drm_dev = ifa->drm_dev;
+    sc->fb_bo = ifa->fb_bo;
+    sc->drm_fb_helper = ifa->drm_fb_helper;
+    sc->fb_sizes = ifa->fb_sizes;
+	sc->drmfb_attached = false;
+    sc->fb_ptr = ifa->fb_ptr;
+
+	aprint_naive("\n");
+	aprint_normal("\n");
+
+	da.da_dev = self;
+	da.da_fb_helper = sc->drm_fb_helper;
+	da.da_fb_sizes = &sc->fb_sizes;
+	da.da_fb_vaddr = sc->fb_ptr;
+	da.da_fb_linebytes = sc->fb_sizes.surface_width *
+		    howmany(sc->fb_sizes.surface_bpp, 8);
+	da.da_params = &vmwgfxfb_drmfb_params;
+
+    device_printf(self, "framebuffer at %p\n", sc->fb_ptr);
+
+	error = drmfb_attach(&sc->sc_drmfb, &da);
+	if (error) {
+		aprint_error_dev(self, "failed to attach drmfb: %d\n",
+		    error);
+        return;
+	}
+    sc->drmfb_attached = true;
+
+	return;
+}
+
+static int
+vmwgfxfb_detach(device_t self, int flags)
+{
+	struct vmwgfxfb_softc *const sc = device_private(self);
+	int error;
+
+	if (sc->drmfb_attached) {
+		error = drmfb_detach(&sc->sc_drmfb, flags);
+		if (error) {
+			return error;
+		}
+		sc->drmfb_attached = false;
+	}
+
+    softint_disestablish(sc->dirty_softint);
+
+	return 0;
+}
+
+static paddr_t
+vmwgfxfb_drmfb_mmapfb(struct drmfb_softc *drmfb, off_t offset, int prot)
+{
+	struct vmwgfxfb_softc *const sc = container_of(drmfb,
+	    struct vmwgfxfb_softc, sc_drmfb);
+
+	KASSERT(0 <= offset);
+
+    /*
+     * TODO this doesn't work as is, because writes to the memory do not come
+     * visible without telling the host that the memory is dirty.
+     */
+
+	return bus_space_mmap(sc->drm_dev->bst, sc->fb_bo->base.mem.bus.base,
+        sc->fb_bo->base.mem.bus.offset + offset,
+	    prot, BUS_SPACE_MAP_PREFETCHABLE);
+}
+
+static void
+vmwgfxfb_drmfb_dirty(struct drmfb_softc *drmfb)
+{
+#if 0
+	struct vmwgfxfb_softc *const sc = container_of(drmfb,
+	    struct vmwgfxfb_softc, sc_drmfb);
+
+    if (cold) {
+        vmw_fb_dirty(sc->drm_dev->dev_private);
+        return;
+    }
+
+    softint_schedule(sc->dirty_softint);
+#endif
+}
+
+static void
+vmwgfxfb_do_dirty(void *cookie)
+{
+	struct vmwgfxfb_softc *sc = cookie;
+
+	vmw_fb_dirty(sc->drm_dev->dev_private);
+}
diff --git a/src/sys/external/bsd/drm2/vmwgfx/vmwgfxfb.h b/src/sys/external/bsd/drm2/vmwgfx/vmwgfxfb.h
new file mode 100644
index 00000000..3e115e7a
--- /dev/null
+++ b/src/sys/external/bsd/drm2/vmwgfx/vmwgfxfb.h
@@ -0,0 +1,19 @@
+#ifndef _VMWGFX_VMWGFXFB_H_
+#define _VMWGFX_VMWGFXFB_H_
+
+#include <sys/bus.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+
+#include "vmwgfx_drv.h"
+
+struct vmwgfxfb_attach_args {
+    struct drm_device *drm_dev;
+    struct drm_fb_helper *drm_fb_helper;
+    struct drm_fb_helper_surface_size fb_sizes;
+    struct vmw_dma_buffer *fb_bo;
+    void *fb_ptr;
+};
+
+#endif


Home | Main Index | Thread Index | Old Index