Source-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[src/riastradh-drm2]: src/sys Adapt the i915 GEM code to NetBSD.



details:   https://anonhg.NetBSD.org/src/rev/ac0c6ec8934d
branches:  riastradh-drm2
changeset: 788526:ac0c6ec8934d
user:      riastradh <riastradh%NetBSD.org@localhost>
date:      Sun Sep 08 15:52:20 2013 +0000

description:
Adapt the i915 GEM code to NetBSD.

diffstat:

 sys/external/bsd/drm2/dist/drm/i915/i915_drv.h            |   30 +-
 sys/external/bsd/drm2/dist/drm/i915/i915_gem.c            |  510 +++++++++++++-
 sys/external/bsd/drm2/dist/drm/i915/i915_gem_execbuffer.c |   39 +-
 sys/external/bsd/drm2/dist/drm/i915/i915_gem_tiling.c     |   33 +
 sys/external/bsd/drm2/i915drm/i915_gem.c                  |  448 ------------
 sys/external/bsd/drm2/i915drm/i915_gem_gtt.c              |  432 +++++++++++
 sys/external/bsd/drm2/i915drm/intel_gtt.c                 |   48 +
 sys/modules/i915drm2/Makefile                             |   19 +-
 8 files changed, 1096 insertions(+), 463 deletions(-)

diffs (truncated from 2130 to 300 lines):

diff -r 0ddfad7ee0e1 -r ac0c6ec8934d sys/external/bsd/drm2/dist/drm/i915/i915_drv.h
--- a/sys/external/bsd/drm2/dist/drm/i915/i915_drv.h    Sun Sep 08 15:47:17 2013 +0000
+++ b/sys/external/bsd/drm2/dist/drm/i915/i915_drv.h    Sun Sep 08 15:52:20 2013 +0000
@@ -1084,7 +1084,14 @@
        unsigned int has_global_gtt_mapping:1;
        unsigned int has_dma_mapping:1;
 
+#ifdef __NetBSD__
+       struct pglist igo_pageq;
+       bus_dma_segment_t *pages; /* `pages' is an expedient misnomer.  */
+       int igo_nsegs;
+       bus_dmamap_t igo_dmamap;
+#else
        struct sg_table *pages;
+#endif
        int pages_pin_count;
 
        /* prime dma-buf support */
@@ -1436,7 +1443,23 @@
 void i915_gem_lastclose(struct drm_device *dev);
 
 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
-#ifndef __NetBSD__             /* XXX */
+#ifdef __NetBSD__              /* XXX */
+static inline struct page *
+i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
+{
+
+       /*
+        * Pages must be pinned so that we need not hold the lock to
+        * prevent them from disappearing.
+        */
+       KASSERT(obj->pages != NULL);
+       mutex_enter(obj->base.gemo_uvmobj.vmobjlock);
+       struct vm_page *const page = uvm_pagelookup(obj->base.gemo_shm_uao, n);
+       mutex_exit(obj->base.gemo_uvmobj.vmobjlock);
+
+       return container_of(page, struct page, p_vmp);
+}
+#else
 static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
 {
        struct scatterlist *sg = obj->pages->sgl;
@@ -1534,7 +1557,10 @@
                     u32 *seqno);
 int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
                                 uint32_t seqno);
-#ifndef __NetBSD__             /* XXX */
+#ifdef __NetBSD__              /* XXX */
+int i915_gem_fault(struct uvm_faultinfo *, vaddr_t, struct vm_page **,
+    int, int, vm_prot_t, int);
+#else
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 #endif
 int __must_check
diff -r 0ddfad7ee0e1 -r ac0c6ec8934d sys/external/bsd/drm2/dist/drm/i915/i915_gem.c
--- a/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c    Sun Sep 08 15:47:17 2013 +0000
+++ b/sys/external/bsd/drm2/dist/drm/i915/i915_gem.c    Sun Sep 08 15:52:20 2013 +0000
@@ -25,6 +25,21 @@
  *
  */
 
+#ifdef __NetBSD__
+#if 0                          /* XXX uvmhist option?  */
+#include "opt_uvmhist.h"
+#endif
+
+#include <sys/types.h>
+#include <sys/param.h>
+
+#include <uvm/uvm.h>
+#include <uvm/uvm_fault.h>
+#include <uvm/uvm_page.h>
+#include <uvm/uvm_pmap.h>
+#include <uvm/uvm_prot.h>
+#endif
+
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
@@ -350,6 +365,9 @@
                 char __user *user_data,
                 bool page_do_bit17_swizzling, bool needs_clflush)
 {
+#ifdef __NetBSD__              /* XXX atomic shmem fast path */
+       return -EFAULT;
+#else
        char *vaddr;
        int ret;
 
@@ -366,6 +384,7 @@
        kunmap_atomic(vaddr);
 
        return ret ? -EFAULT : 0;
+#endif
 }
 
 static void
@@ -431,10 +450,14 @@
        int shmem_page_offset, page_length, ret = 0;
        int obj_do_bit17_swizzling, page_do_bit17_swizzling;
        int hit_slowpath = 0;
+#ifndef __NetBSD__             /* XXX */
        int prefaulted = 0;
+#endif
        int needs_clflush = 0;
+#ifndef __NetBSD__
        struct scatterlist *sg;
        int i;
+#endif
 
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
@@ -463,6 +486,50 @@
 
        offset = args->offset;
 
+#ifdef __NetBSD__
+       /*
+        * XXX This is a big #ifdef with a lot of duplicated code, but
+        * factoring out the loop head -- which is all that
+        * substantially differs -- is probably more trouble than it's
+        * worth at the moment.
+        */
+       while (0 < remain) {
+               /* Get the next page.  */
+               shmem_page_offset = offset_in_page(offset);
+               KASSERT(shmem_page_offset < PAGE_SIZE);
+               page_length = MIN(remain, (PAGE_SIZE - shmem_page_offset));
+               struct page *const page = i915_gem_object_get_page(obj,
+                   (offset & ~(PAGE_SIZE-1)));
+
+               /* Decide whether to swizzle bit 17.  */
+               page_do_bit17_swizzling = obj_do_bit17_swizzling &&
+                   (page_to_phys(page) & (1 << 17)) != 0;
+
+               /* Try the fast path.  */
+               ret = shmem_pread_fast(page, shmem_page_offset, page_length,
+                   user_data, page_do_bit17_swizzling, needs_clflush);
+               if (ret == 0)
+                       goto next_page;
+
+               /* Fast path failed.  Try the slow path.  */
+               hit_slowpath = 1;
+               mutex_unlock(&dev->struct_mutex);
+               /* XXX prefault */
+               ret = shmem_pread_slow(page, shmem_page_offset, page_length,
+                   user_data, page_do_bit17_swizzling, needs_clflush);
+               mutex_lock(&dev->struct_mutex);
+
+next_page:
+               /* XXX mark page accessed */
+               if (ret)
+                       goto out;
+
+               KASSERT(page_length <= remain);
+               remain -= page_length;
+               user_data += page_length;
+               offset += page_length;
+       }
+#else
        for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
                struct page *page;
 
@@ -521,6 +588,7 @@
                user_data += page_length;
                offset += page_length;
        }
+#endif
 
 out:
        i915_gem_object_unpin_pages(obj);
@@ -572,6 +640,7 @@
                goto out;
        }
 
+#ifndef __NetBSD__             /* XXX drm prime */
        /* prime objects have no backing filp to GEM pread/pwrite
         * pages from.
         */
@@ -579,6 +648,7 @@
                ret = -EINVAL;
                goto out;
        }
+#endif
 
        trace_i915_gem_object_pread(obj, args->offset, args->size);
 
@@ -601,6 +671,9 @@
                char __user *user_data,
                int length)
 {
+#ifdef __NetBSD__              /* XXX atomic shmem fast path */
+       return -EFAULT;
+#else
        void __iomem *vaddr_atomic;
        void *vaddr;
        unsigned long unwritten;
@@ -612,6 +685,7 @@
                                                      user_data, length);
        io_mapping_unmap_atomic(vaddr_atomic);
        return unwritten;
+#endif
 }
 
 /**
@@ -692,6 +766,9 @@
                  bool needs_clflush_before,
                  bool needs_clflush_after)
 {
+#ifdef __NetBSD__
+       return -EFAULT;
+#else
        char *vaddr;
        int ret;
 
@@ -711,6 +788,7 @@
        kunmap_atomic(vaddr);
 
        return ret ? -EFAULT : 0;
+#endif
 }
 
 /* Only difference to the fast-path function is that this can handle bit17
@@ -761,8 +839,10 @@
        int hit_slowpath = 0;
        int needs_clflush_after = 0;
        int needs_clflush_before = 0;
+#ifndef __NetBSD__
        int i;
        struct scatterlist *sg;
+#endif
 
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
@@ -797,6 +877,49 @@
        offset = args->offset;
        obj->dirty = 1;
 
+#ifdef __NetBSD__
+       while (0 < remain) {
+               /* Get the next page.  */
+               shmem_page_offset = offset_in_page(offset);
+               KASSERT(shmem_page_offset < PAGE_SIZE);
+               page_length = MIN(remain, (PAGE_SIZE - shmem_page_offset));
+               struct page *const page = i915_gem_object_get_page(obj,
+                   (offset & ~(PAGE_SIZE-1)));
+
+               /* Decide whether to flush the cache or swizzle bit 17.  */
+               const bool partial_cacheline_write = needs_clflush_before &&
+                   ((shmem_page_offset | page_length)
+                       & (cpu_info_primary.ci_cflush_lsize - 1));
+               page_do_bit17_swizzling = obj_do_bit17_swizzling &&
+                   (page_to_phys(page) & (1 << 17)) != 0;
+
+               /* Try the fast path.  */
+               ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
+                   user_data, page_do_bit17_swizzling,
+                   partial_cacheline_write, needs_clflush_after);
+               if (ret == 0)
+                       goto next_page;
+
+               /* Fast path failed.  Try the slow path.  */
+               hit_slowpath = 1;
+               mutex_unlock(&dev->struct_mutex);
+               ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
+                   user_data, page_do_bit17_swizzling,
+                   partial_cacheline_write, needs_clflush_after);
+               mutex_lock(&dev->struct_mutex);
+
+next_page:
+               page->p_vmp.flags &= ~PG_CLEAN;
+               /* XXX mark page accessed */
+               if (ret)
+                       goto out;
+
+               KASSERT(page_length <= remain);
+               remain -= page_length;
+               user_data += page_length;
+               offset += page_length;
+       }
+#else
        for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
                struct page *page;
                int partial_cacheline_write;
@@ -856,6 +979,7 @@
                user_data += page_length;
                offset += page_length;
        }
+#endif
 
 out:
        i915_gem_object_unpin_pages(obj);
@@ -899,10 +1023,12 @@
                       args->size))
                return -EFAULT;
 
+#ifndef __NetBSD__             /* XXX prefault */
        ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr,
                                           args->size);



Home | Main Index | Thread Index | Old Index