From b22da3d8526a935aa31e086e63f60ff3246cb61c Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Sat, 09 Dec 2023 07:24:11 +0000
Subject: [PATCH] add stmac read mac form eeprom
---
kernel/drivers/gpu/drm/qxl/qxl_object.c | 134 +++++++++++++++++++++++++-------------------
1 files changed, 76 insertions(+), 58 deletions(-)
diff --git a/kernel/drivers/gpu/drm/qxl/qxl_object.c b/kernel/drivers/gpu/drm/qxl/qxl_object.c
index 6a30196..5ee5171 100644
--- a/kernel/drivers/gpu/drm/qxl/qxl_object.c
+++ b/kernel/drivers/gpu/drm/qxl/qxl_object.c
@@ -33,13 +33,14 @@
struct qxl_device *qdev;
bo = to_qxl_bo(tbo);
- qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+ qdev = to_qxl(bo->tbo.base.dev);
qxl_surface_evict(qdev, bo, false);
+ WARN_ON_ONCE(bo->map_count > 0);
mutex_lock(&qdev->gem.mutex);
list_del_init(&bo->list);
mutex_unlock(&qdev->gem.mutex);
- drm_gem_object_release(&bo->gem_base);
+ drm_gem_object_release(&bo->tbo.base);
kfree(bo);
}
@@ -53,19 +54,34 @@
void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
{
u32 c = 0;
- u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
- unsigned i;
+ u32 pflag = 0;
+ unsigned int i;
+
+ if (pinned)
+ pflag |= TTM_PL_FLAG_NO_EVICT;
+ if (qbo->tbo.base.size <= PAGE_SIZE)
+ pflag |= TTM_PL_FLAG_TOPDOWN;
qbo->placement.placement = qbo->placements;
qbo->placement.busy_placement = qbo->placements;
- if (domain == QXL_GEM_DOMAIN_VRAM)
- qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
- if (domain == QXL_GEM_DOMAIN_SURFACE)
- qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV | pflag;
- if (domain == QXL_GEM_DOMAIN_CPU)
- qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
- if (!c)
- qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (domain == QXL_GEM_DOMAIN_VRAM) {
+ qbo->placements[c].mem_type = TTM_PL_VRAM;
+ qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag;
+ }
+ if (domain == QXL_GEM_DOMAIN_SURFACE) {
+ qbo->placements[c].mem_type = TTM_PL_PRIV;
+ qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag;
+ qbo->placements[c].mem_type = TTM_PL_VRAM;
+ qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag;
+ }
+ if (domain == QXL_GEM_DOMAIN_CPU) {
+ qbo->placements[c].mem_type = TTM_PL_SYSTEM;
+ qbo->placements[c++].flags = TTM_PL_MASK_CACHING | pflag;
+ }
+ if (!c) {
+ qbo->placements[c].mem_type = TTM_PL_SYSTEM;
+ qbo->placements[c++].flags = TTM_PL_MASK_CACHING;
+ }
qbo->placement.num_placement = c;
qbo->placement.num_busy_placement = c;
for (i = 0; i < c; ++i) {
@@ -74,9 +90,21 @@
}
}
+static const struct drm_gem_object_funcs qxl_object_funcs = {
+ .free = qxl_gem_object_free,
+ .open = qxl_gem_object_open,
+ .close = qxl_gem_object_close,
+ .pin = qxl_gem_prime_pin,
+ .unpin = qxl_gem_prime_unpin,
+ .get_sg_table = qxl_gem_prime_get_sg_table,
+ .vmap = qxl_gem_prime_vmap,
+ .vunmap = qxl_gem_prime_vunmap,
+ .mmap = drm_gem_ttm_mmap,
+ .print_info = drm_gem_ttm_print_info,
+};
-int qxl_bo_create(struct qxl_device *qdev,
- unsigned long size, bool kernel, bool pinned, u32 domain,
+int qxl_bo_create(struct qxl_device *qdev, unsigned long size,
+ bool kernel, bool pinned, u32 domain, u32 priority,
struct qxl_surface *surf,
struct qxl_bo **bo_ptr)
{
@@ -93,11 +121,12 @@
if (bo == NULL)
return -ENOMEM;
size = roundup(size, PAGE_SIZE);
- r = drm_gem_object_init(&qdev->ddev, &bo->gem_base, size);
+ r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size);
if (unlikely(r)) {
kfree(bo);
return r;
}
+ bo->tbo.base.funcs = &qxl_object_funcs;
bo->type = domain;
bo->pin_count = pinned ? 1 : 0;
bo->surface_id = 0;
@@ -108,6 +137,7 @@
qxl_ttm_placement_from_domain(bo, domain, pinned);
+ bo->tbo.priority = priority;
r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
&bo->placement, 0, !kernel, size,
NULL, NULL, &qxl_ttm_bo_destroy);
@@ -130,6 +160,7 @@
if (bo->kptr) {
if (ptr)
*ptr = bo->kptr;
+ bo->map_count++;
return 0;
}
r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
@@ -138,13 +169,14 @@
bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
if (ptr)
*ptr = bo->kptr;
+ bo->map_count = 1;
return 0;
}
-void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
- struct qxl_bo *bo, int page_offset)
+void *qxl_bo_kmap_local_page(struct qxl_device *qdev,
+ struct qxl_bo *bo, int page_offset)
{
- struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
+ unsigned long offset;
void *rptr;
int ret;
struct io_mapping *map;
@@ -156,11 +188,8 @@
else
goto fallback;
- (void) ttm_mem_io_lock(man, false);
- ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
- ttm_mem_io_unlock(man);
-
- return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
+ offset = bo->tbo.mem.start << PAGE_SHIFT;
+ return io_mapping_map_local_wc(map, offset + page_offset);
fallback:
if (bo->kptr) {
rptr = bo->kptr + (page_offset * PAGE_SIZE);
@@ -179,29 +208,22 @@
{
if (bo->kptr == NULL)
return;
+ bo->map_count--;
+ if (bo->map_count > 0)
+ return;
bo->kptr = NULL;
ttm_bo_kunmap(&bo->kmap);
}
-void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
- struct qxl_bo *bo, void *pmap)
+void qxl_bo_kunmap_local_page(struct qxl_device *qdev,
+ struct qxl_bo *bo, void *pmap)
{
- struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
- struct io_mapping *map;
-
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
- map = qdev->vram_mapping;
- else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
- map = qdev->surface_mapping;
- else
+ if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
+ (bo->tbo.mem.mem_type != TTM_PL_PRIV))
goto fallback;
- io_mapping_unmap_atomic(pmap);
-
- (void) ttm_mem_io_lock(man, false);
- ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
- ttm_mem_io_unlock(man);
- return ;
+ io_mapping_unmap_local(pmap);
+ return;
fallback:
qxl_bo_kunmap(bo);
}
@@ -211,34 +233,30 @@
if ((*bo) == NULL)
return;
- drm_gem_object_put_unlocked(&(*bo)->gem_base);
+ drm_gem_object_put(&(*bo)->tbo.base);
*bo = NULL;
}
struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
{
- drm_gem_object_get(&bo->gem_base);
+ drm_gem_object_get(&bo->tbo.base);
return bo;
}
-static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
+static int __qxl_bo_pin(struct qxl_bo *bo)
{
struct ttm_operation_ctx ctx = { false, false };
- struct drm_device *ddev = bo->gem_base.dev;
+ struct drm_device *ddev = bo->tbo.base.dev;
int r;
if (bo->pin_count) {
bo->pin_count++;
- if (gpu_addr)
- *gpu_addr = qxl_bo_gpu_offset(bo);
return 0;
}
- qxl_ttm_placement_from_domain(bo, domain, true);
+ qxl_ttm_placement_from_domain(bo, bo->type, true);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (likely(r == 0)) {
bo->pin_count = 1;
- if (gpu_addr != NULL)
- *gpu_addr = qxl_bo_gpu_offset(bo);
}
if (unlikely(r != 0))
dev_err(ddev->dev, "%p pin failed\n", bo);
@@ -248,7 +266,7 @@
static int __qxl_bo_unpin(struct qxl_bo *bo)
{
struct ttm_operation_ctx ctx = { false, false };
- struct drm_device *ddev = bo->gem_base.dev;
+ struct drm_device *ddev = bo->tbo.base.dev;
int r, i;
if (!bo->pin_count) {
@@ -266,21 +284,20 @@
return r;
}
-
/*
* Reserve the BO before pinning the object. If the BO was reserved
* beforehand, use the internal version directly __qxl_bo_pin.
*
*/
-int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
+int qxl_bo_pin(struct qxl_bo *bo)
{
int r;
- r = qxl_bo_reserve(bo, false);
+ r = qxl_bo_reserve(bo);
if (r)
return r;
- r = __qxl_bo_pin(bo, bo->type, NULL);
+ r = __qxl_bo_pin(bo);
qxl_bo_unreserve(bo);
return r;
}
@@ -294,7 +311,7 @@
{
int r;
- r = qxl_bo_reserve(bo, false);
+ r = qxl_bo_reserve(bo);
if (r)
return r;
@@ -312,13 +329,13 @@
dev_err(qdev->ddev.dev, "Userspace still has active objects !\n");
list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n",
- &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
- *((unsigned long *)&bo->gem_base.refcount));
+ &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
+ *((unsigned long *)&bo->tbo.base.refcount));
mutex_lock(&qdev->gem.mutex);
list_del_init(&bo->list);
mutex_unlock(&qdev->gem.mutex);
/* this should unref the ttm bo */
- drm_gem_object_put_unlocked(&bo->gem_base);
+ drm_gem_object_put(&bo->tbo.base);
}
}
@@ -335,13 +352,14 @@
int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
{
int ret;
+
if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
/* allocate a surface id for this surface now */
ret = qxl_surface_id_alloc(qdev, bo);
if (ret)
return ret;
- ret = qxl_hw_surface_alloc(qdev, bo, NULL);
+ ret = qxl_hw_surface_alloc(qdev, bo);
if (ret)
return ret;
}
--
Gitblit v1.6.2