[cairo] [PATCH 34/71] drm: generalized bo unmap
Enrico Weigelt, metux IT consult
enrico.weigelt at gr13.net
Mon Apr 17 16:57:13 UTC 2017
We have separate implementations of unmapping the bo's from
process address space, which are essentially doing the same.
Signed-off-by: Enrico Weigelt, metux IT consult <enrico.weigelt at gr13.net>
---
src/drm/cairo-drm-bo.c | 16 ++++++++++++++++
src/drm/cairo-drm-i915-surface.c | 6 ++----
src/drm/cairo-drm-i965-surface.c | 5 ++---
src/drm/cairo-drm-intel-private.h | 4 ----
src/drm/cairo-drm-intel.c | 30 +++++++++++-------------------
src/drm/cairo-drm-private.h | 6 ++++++
src/drm/cairo-drm-radeon-private.h | 5 -----
src/drm/cairo-drm-radeon-surface.c | 4 ++--
src/drm/cairo-drm-radeon.c | 23 +++++++----------------
9 files changed, 46 insertions(+), 53 deletions(-)
diff --git a/src/drm/cairo-drm-bo.c b/src/drm/cairo-drm-bo.c
index c82f9331d..6b946ac62 100644
--- a/src/drm/cairo-drm-bo.c
+++ b/src/drm/cairo-drm-bo.c
@@ -34,6 +34,7 @@
#include <sys/ioctl.h>
#include <errno.h>
#include <libdrm/drm.h>
+#include <sys/mman.h> /* munmap() */
#define ERR_DEBUG(x) x
@@ -92,8 +93,23 @@ _cairo_drm_bo_close (const cairo_drm_device_t *dev,
struct drm_gem_close close;
int ret;
+ _cairo_drm_bo_unmap (bo);
+
close.handle = bo->handle;
do {
ret = ioctl (dev->fd, DRM_IOCTL_GEM_CLOSE, &close);
} while (ret == -1 && errno == EINTR);
}
+
+void
+_cairo_drm_bo_unmap (cairo_drm_bo_t *bo)
+{
+ if (unlikely(bo == NULL))
+ return;
+
+ if (unlikely(bo->mapped == NULL))
+ return;
+
+ munmap (bo->mapped, bo->size);
+ bo->mapped = NULL;
+}
diff --git a/src/drm/cairo-drm-i915-surface.c b/src/drm/cairo-drm-i915-surface.c
index e886fdc55..3f8004b35 100644
--- a/src/drm/cairo-drm-i915-surface.c
+++ b/src/drm/cairo-drm-i915-surface.c
@@ -256,10 +256,10 @@ i915_bo_exec (i915_device_t *device, intel_bo_t *bo, uint32_t offset)
bo->offset = device->batch.exec[i].offset;
bo->busy = TRUE;
- if (bo->virtual)
- intel_bo_unmap (bo);
bo->cpu = FALSE;
+ _cairo_drm_bo_unmap (&bo->base);
+
while (cnt--) {
intel_bo_t *bo = device->batch.target_bo[cnt];
@@ -270,8 +270,6 @@ i915_bo_exec (i915_device_t *device, intel_bo_t *bo, uint32_t offset)
bo->batch_write_domain = 0;
cairo_list_del (&bo->cache_list);
- if (bo->virtual)
- intel_bo_unmap (bo);
bo->cpu = FALSE;
intel_bo_destroy (&device->intel, bo);
diff --git a/src/drm/cairo-drm-i965-surface.c b/src/drm/cairo-drm-i965-surface.c
index b26e0c787..825555fe3 100644
--- a/src/drm/cairo-drm-i965-surface.c
+++ b/src/drm/cairo-drm-i965-surface.c
@@ -382,11 +382,10 @@ i965_exec (i965_device_t *device, uint32_t offset)
bo->exec = NULL;
bo->batch_read_domains = 0;
bo->batch_write_domain = 0;
-
- if (bo->virtual)
- intel_bo_unmap (bo);
bo->cpu = FALSE;
+ _cairo_drm_bo_unmap (&bo->base);
+
if (bo->purgeable)
ret = intel_bo_madvise (&device->intel, bo, I915_MADV_DONTNEED);
/* ignore immediate notification of purging */
diff --git a/src/drm/cairo-drm-intel-private.h b/src/drm/cairo-drm-intel-private.h
index a9a5ff9ce..43550eeca 100644
--- a/src/drm/cairo-drm-intel-private.h
+++ b/src/drm/cairo-drm-intel-private.h
@@ -75,7 +75,6 @@ typedef struct _intel_bo {
uint32_t cpu :1;
struct drm_i915_gem_exec_object2 *exec;
- void *virtual;
} intel_bo_t;
#define INTEL_BATCH_SIZE (64*1024)
@@ -350,9 +349,6 @@ intel_bo_read (const intel_device_t *dev,
cairo_private void *
intel_bo_map (const intel_device_t *dev, intel_bo_t *bo);
-cairo_private void
-intel_bo_unmap (intel_bo_t *bo);
-
cairo_private cairo_status_t
intel_bo_init (const intel_device_t *dev,
intel_bo_t *bo,
diff --git a/src/drm/cairo-drm-intel.c b/src/drm/cairo-drm-intel.c
index 6c4187771..9e8219f8a 100644
--- a/src/drm/cairo-drm-intel.c
+++ b/src/drm/cairo-drm-intel.c
@@ -163,8 +163,8 @@ intel_bo_map (const intel_device_t *device, intel_bo_t *bo)
intel_bo_set_tiling (device, bo);
- if (bo->virtual != NULL)
- return bo->virtual;
+ if (bo->base.mapped != NULL)
+ return bo->base.mapped;
if (bo->cpu && bo->tiling == I915_TILING_NONE) {
struct drm_i915_gem_mmap mmap_arg;
@@ -182,7 +182,7 @@ intel_bo_map (const intel_device_t *device, intel_bo_t *bo)
return NULL;
}
- bo->virtual = (void *) (uintptr_t) mmap_arg.addr_ptr;
+ bo->base.mapped = (void *) (uintptr_t) mmap_arg.addr_ptr;
domain = I915_GEM_DOMAIN_CPU;
} else {
struct drm_i915_gem_mmap_gtt mmap_arg;
@@ -208,11 +208,11 @@ intel_bo_map (const intel_device_t *device, intel_bo_t *bo)
return NULL;
}
- bo->virtual = ptr;
+ bo->base.mapped = ptr;
domain = I915_GEM_DOMAIN_GTT;
}
- VG (VALGRIND_MAKE_MEM_DEFINED (bo->virtual, bo->base.size));
+ VG (VALGRIND_MAKE_MEM_DEFINED (bo->base.mapped, bo->base.size));
set_domain.handle = bo->base.handle;
set_domain.read_domains = domain;
@@ -225,20 +225,13 @@ intel_bo_map (const intel_device_t *device, intel_bo_t *bo)
} while (ret == -1 && errno == EINTR);
if (ret != 0) {
- intel_bo_unmap (bo);
+ _cairo_drm_bo_unmap (&bo->base);
_cairo_error_throw (CAIRO_STATUS_DEVICE_ERROR);
return NULL;
}
bo->busy = FALSE;
- return bo->virtual;
-}
-
-void
-intel_bo_unmap (intel_bo_t *bo)
-{
- munmap (bo->virtual, bo->base.size);
- bo->virtual = NULL;
+ return bo->base.mapped;
}
cairo_bool_t
@@ -398,7 +391,7 @@ intel_bo_create (intel_device_t *device,
bo->base.name = 0;
bo->offset = 0;
- bo->virtual = NULL;
+ bo->base.mapped = NULL;
bo->cpu = TRUE;
bo->_tiling = I915_TILING_NONE;
@@ -457,7 +450,7 @@ intel_bo_create_for_name (intel_device_t *device, uint32_t name)
bo->full_size = bo->base.size;
bo->offset = 0;
- bo->virtual = NULL;
+ bo->base.mapped = NULL;
bo->purgeable = 0;
bo->busy = TRUE;
bo->cpu = FALSE;
@@ -496,8 +489,7 @@ intel_bo_release (cairo_drm_device_t *_dev, cairo_drm_bo_t *_bo)
intel_device_t *device = _cairo_drm_device_cast_intel(_dev);
intel_bo_t *bo = _cairo_drm_bo_cast_intel(_bo);
- if (bo->virtual != NULL)
- intel_bo_unmap (bo);
+ _cairo_drm_bo_unmap (_bo);
assert (bo->exec == NULL);
assert (cairo_list_is_empty (&bo->cache_list));
@@ -830,7 +822,7 @@ intel_glyph_cache_add_glyph (intel_device_t *device,
height = glyph_surface->height;
src = glyph_surface->data;
- dst = cache->buffer.bo->virtual;
+ dst = cache->buffer.bo->base.mapped;
if (dst == NULL) {
dst = intel_bo_map (device, cache->buffer.bo);
if (unlikely (dst == NULL))
diff --git a/src/drm/cairo-drm-private.h b/src/drm/cairo-drm-private.h
index 5c4b74f8b..299ed3630 100644
--- a/src/drm/cairo-drm-private.h
+++ b/src/drm/cairo-drm-private.h
@@ -112,6 +112,7 @@ struct _cairo_drm_bo {
uint32_t name;
uint32_t handle;
uint32_t size;
+ void* mapped;
};
struct _cairo_drm_device {
@@ -183,6 +184,9 @@ _cairo_device_cast_drm_const(const cairo_device_t *device)
return cairo_container_of (device, const cairo_drm_device_t, base);
}
+cairo_private void
+_cairo_drm_bo_unmap (cairo_drm_bo_t *bo);
+
static inline cairo_drm_bo_t *
cairo_drm_bo_reference (cairo_drm_bo_t *bo)
{
@@ -200,6 +204,8 @@ static cairo_always_inline void
cairo_drm_bo_destroy (cairo_device_t *abstract_device,
cairo_drm_bo_t *bo)
{
+ _cairo_drm_bo_unmap (bo);
+
if (_cairo_reference_count_dec_and_test (&bo->ref_count)) {
cairo_drm_device_t *device = (cairo_drm_device_t *) abstract_device;
device->bo.release (device, bo);
diff --git a/src/drm/cairo-drm-radeon-private.h b/src/drm/cairo-drm-radeon-private.h
index 107f55ba8..3567562de 100644
--- a/src/drm/cairo-drm-radeon-private.h
+++ b/src/drm/cairo-drm-radeon-private.h
@@ -38,8 +38,6 @@
typedef struct _radeon_bo {
cairo_drm_bo_t base;
- void *virtual;
-
cairo_bool_t in_batch;
uint32_t read_domains;
uint32_t write_domain;
@@ -180,9 +178,6 @@ radeon_bo_wait (const radeon_device_t *dev, radeon_bo_t *bo);
cairo_private void *
radeon_bo_map (const radeon_device_t *dev, radeon_bo_t *bo);
-cairo_private void
-radeon_bo_unmap (radeon_bo_t *bo);
-
cairo_private cairo_drm_bo_t *
radeon_bo_create (radeon_device_t *dev,
uint32_t size,
diff --git a/src/drm/cairo-drm-radeon-surface.c b/src/drm/cairo-drm-radeon-surface.c
index 3adb0e0f2..7de342f08 100644
--- a/src/drm/cairo-drm-radeon-surface.c
+++ b/src/drm/cairo-drm-radeon-surface.c
@@ -138,7 +138,7 @@ radeon_surface_map_to_image (radeon_surface_t *surface)
surface->base.height,
surface->base.stride);
if (unlikely (image->status)) {
- radeon_bo_unmap (to_radeon_bo (surface->base.bo));
+ _cairo_drm_bo_unmap (surface->base.bo);
return image;
}
@@ -168,7 +168,7 @@ radeon_surface_flush (void *abstract_surface,
cairo_surface_destroy (surface->base.fallback);
surface->base.fallback = NULL;
- radeon_bo_unmap (to_radeon_bo (surface->base.bo));
+ _cairo_drm_bo_unmap (surface->base.bo);
return status;
}
diff --git a/src/drm/cairo-drm-radeon.c b/src/drm/cairo-drm-radeon.c
index 94bc2951f..bf92270ed 100644
--- a/src/drm/cairo-drm-radeon.c
+++ b/src/drm/cairo-drm-radeon.c
@@ -87,7 +87,7 @@ radeon_bo_write (const radeon_device_t *device,
ptr = radeon_bo_map (device, bo);
if (ptr != NULL) {
memcpy (ptr + offset, data, size);
- radeon_bo_unmap (bo);
+ _cairo_drm_bo_unmap (&bo->base);
}
}
}
@@ -118,7 +118,7 @@ radeon_bo_read (const radeon_device_t *device,
ptr = radeon_bo_map (device, bo);
if (ptr != NULL) {
memcpy (data, ptr + offset, size);
- radeon_bo_unmap (bo);
+ _cairo_drm_bo_unmap (&bo->base);
}
}
@@ -144,7 +144,7 @@ radeon_bo_map (const radeon_device_t *device, radeon_bo_t *bo)
void *ptr;
int ret;
- assert (bo->virtual == NULL);
+ assert (bo->base.mapped == NULL);
memset (&mmap_arg, 0, sizeof (mmap_arg));
mmap_arg.handle = bo->base.handle;
@@ -170,19 +170,10 @@ radeon_bo_map (const radeon_device_t *device, radeon_bo_t *bo)
return NULL;
}
- bo->virtual = ptr;
+ bo->base.mapped = ptr;
/* XXX set_domain? */
- return bo->virtual;
-}
-
-void
-radeon_bo_unmap (radeon_bo_t *bo)
-{
- assert (bo->virtual != NULL);
-
- munmap (bo->virtual, bo->base.size);
- bo->virtual = NULL;
+ return bo->base.mapped;
}
cairo_drm_bo_t *
@@ -215,7 +206,7 @@ radeon_bo_create (radeon_device_t *device,
bo->base.handle = create.handle;
bo->base.size = size;
- bo->virtual = NULL;
+ bo->base.mapped = NULL;
bo->in_batch = FALSE;
bo->read_domains = 0;
@@ -242,7 +233,7 @@ radeon_bo_create_for_name (radeon_device_t *device,
return NULL;
}
- bo->virtual = NULL;
+ bo->base.mapped = NULL;
bo->in_batch = FALSE;
bo->read_domains = 0;
--
2.11.0.rc0.7.gbe5a750
More information about the cairo
mailing list