drm/amdkfd: Encapsulate DQM functions in ops structure
authorOded Gabbay <oded.gabbay@amd.com>
Mon, 12 Jan 2015 12:26:10 +0000 (14:26 +0200)
committerOded Gabbay <oded.gabbay@amd.com>
Mon, 12 Jan 2015 12:26:10 +0000 (14:26 +0200)
This patch does some re-org on the device_queue_manager structure. It takes out
all the function pointers from the structure and puts them in a new structure,
called device_queue_manager_ops. Then, it puts an instance of that structure
inside device_queue_manager.

This re-org is done to prepare the DQM module to support more than one AMD APU
(Kaveri).

Signed-off-by: Oded Gabbay <oded.gabbay@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c

index b008fd67ace9ea4db06a74a384f5d483cf9cfa8b..38b6150a19eeb5cad10ed9dcb3033aeb8cb61355 100644 (file)
@@ -439,7 +439,7 @@ static long kfd_ioctl_set_memory_policy(struct file *filep,
                (args.alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
                   ? cache_policy_coherent : cache_policy_noncoherent;
 
-       if (!dev->dqm->set_cache_memory_policy(dev->dqm,
+       if (!dev->dqm->ops.set_cache_memory_policy(dev->dqm,
                                &pdd->qpd,
                                default_policy,
                                alternate_policy,
index a23ed2440080ea64c79a873cf83de200918f83dc..a770ec6f22cad24a878d96426ebb2f9395cefdea 100644 (file)
@@ -253,7 +253,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
                goto device_queue_manager_error;
        }
 
-       if (kfd->dqm->start(kfd->dqm) != 0) {
+       if (kfd->dqm->ops.start(kfd->dqm) != 0) {
                dev_err(kfd_device,
                        "Error starting queuen manager for device (%x:%x)\n",
                        kfd->pdev->vendor, kfd->pdev->device);
@@ -307,7 +307,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd)
        BUG_ON(kfd == NULL);
 
        if (kfd->init_complete) {
-               kfd->dqm->stop(kfd->dqm);
+               kfd->dqm->ops.stop(kfd->dqm);
                amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
                amd_iommu_free_device(kfd->pdev);
        }
@@ -328,7 +328,7 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
                        return -ENXIO;
                amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
                                                iommu_pasid_shutdown_callback);
-               kfd->dqm->start(kfd->dqm);
+               kfd->dqm->ops.start(kfd->dqm);
        }
 
        return 0;
index c83f011534401e7043984098e3c4a86816dfe04c..12c84488551e80f7f04028e06e0bbc7d0a502321 100644 (file)
@@ -271,7 +271,7 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
 
        BUG_ON(!dqm || !q || !qpd);
 
-       mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+       mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
        if (mqd == NULL)
                return -ENOMEM;
 
@@ -305,14 +305,14 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
        mutex_lock(&dqm->lock);
 
        if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
-               mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+               mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
                if (mqd == NULL) {
                        retval = -ENOMEM;
                        goto out;
                }
                deallocate_hqd(dqm, q);
        } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
-               mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
+               mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
                if (mqd == NULL) {
                        retval = -ENOMEM;
                        goto out;
@@ -348,7 +348,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
        BUG_ON(!dqm || !q || !q->mqd);
 
        mutex_lock(&dqm->lock);
-       mqd = dqm->get_mqd_manager(dqm, q->properties.type);
+       mqd = dqm->ops.get_mqd_manager(dqm, q->properties.type);
        if (mqd == NULL) {
                mutex_unlock(&dqm->lock);
                return -ENOMEM;
@@ -515,7 +515,7 @@ static int init_pipelines(struct device_queue_manager *dqm,
 
        memset(hpdptr, 0, CIK_HPD_EOP_BYTES * pipes_num);
 
-       mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+       mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
        if (mqd == NULL) {
                kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
                return -ENOMEM;
@@ -646,7 +646,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
        struct mqd_manager *mqd;
        int retval;
 
-       mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
+       mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
        if (!mqd)
                return -ENOMEM;
 
@@ -849,7 +849,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
        if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
                select_sdma_engine_id(q);
 
-       mqd = dqm->get_mqd_manager(dqm,
+       mqd = dqm->ops.get_mqd_manager(dqm,
                        get_mqd_type_from_queue_type(q->properties.type));
 
        if (mqd == NULL) {
@@ -994,7 +994,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
 
        /* remove queue from list to prevent rescheduling after preemption */
        mutex_lock(&dqm->lock);
-       mqd = dqm->get_mqd_manager(dqm,
+       mqd = dqm->ops.get_mqd_manager(dqm,
                        get_mqd_type_from_queue_type(q->properties.type));
        if (!mqd) {
                retval = -ENOMEM;
@@ -1116,40 +1116,40 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
        case KFD_SCHED_POLICY_HWS:
        case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
                /* initialize dqm for cp scheduling */
-               dqm->create_queue = create_queue_cpsch;
-               dqm->initialize = initialize_cpsch;
-               dqm->start = start_cpsch;
-               dqm->stop = stop_cpsch;
-               dqm->destroy_queue = destroy_queue_cpsch;
-               dqm->update_queue = update_queue;
-               dqm->get_mqd_manager = get_mqd_manager_nocpsch;
-               dqm->register_process = register_process_nocpsch;
-               dqm->unregister_process = unregister_process_nocpsch;
-               dqm->uninitialize = uninitialize_nocpsch;
-               dqm->create_kernel_queue = create_kernel_queue_cpsch;
-               dqm->destroy_kernel_queue = destroy_kernel_queue_cpsch;
-               dqm->set_cache_memory_policy = set_cache_memory_policy;
+               dqm->ops.create_queue = create_queue_cpsch;
+               dqm->ops.initialize = initialize_cpsch;
+               dqm->ops.start = start_cpsch;
+               dqm->ops.stop = stop_cpsch;
+               dqm->ops.destroy_queue = destroy_queue_cpsch;
+               dqm->ops.update_queue = update_queue;
+               dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
+               dqm->ops.register_process = register_process_nocpsch;
+               dqm->ops.unregister_process = unregister_process_nocpsch;
+               dqm->ops.uninitialize = uninitialize_nocpsch;
+               dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
+               dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
+               dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
                break;
        case KFD_SCHED_POLICY_NO_HWS:
                /* initialize dqm for no cp scheduling */
-               dqm->start = start_nocpsch;
-               dqm->stop = stop_nocpsch;
-               dqm->create_queue = create_queue_nocpsch;
-               dqm->destroy_queue = destroy_queue_nocpsch;
-               dqm->update_queue = update_queue;
-               dqm->get_mqd_manager = get_mqd_manager_nocpsch;
-               dqm->register_process = register_process_nocpsch;
-               dqm->unregister_process = unregister_process_nocpsch;
-               dqm->initialize = initialize_nocpsch;
-               dqm->uninitialize = uninitialize_nocpsch;
-               dqm->set_cache_memory_policy = set_cache_memory_policy;
+               dqm->ops.start = start_nocpsch;
+               dqm->ops.stop = stop_nocpsch;
+               dqm->ops.create_queue = create_queue_nocpsch;
+               dqm->ops.destroy_queue = destroy_queue_nocpsch;
+               dqm->ops.update_queue = update_queue;
+               dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
+               dqm->ops.register_process = register_process_nocpsch;
+               dqm->ops.unregister_process = unregister_process_nocpsch;
+               dqm->ops.initialize = initialize_nocpsch;
+               dqm->ops.uninitialize = uninitialize_nocpsch;
+               dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
                break;
        default:
                BUG();
                break;
        }
 
-       if (dqm->initialize(dqm) != 0) {
+       if (dqm->ops.initialize(dqm) != 0) {
                kfree(dqm);
                return NULL;
        }
@@ -1161,7 +1161,7 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm)
 {
        BUG_ON(!dqm);
 
-       dqm->uninitialize(dqm);
+       dqm->ops.uninitialize(dqm);
        kfree(dqm);
 }
 
index 554c06ee8892efedff4471bc19a10290b42755e0..72d2ca056e1916cd3bd5ba323214238fc52e13be 100644 (file)
@@ -46,7 +46,7 @@ struct device_process_node {
 };
 
 /**
- * struct device_queue_manager
+ * struct device_queue_manager_ops
  *
  * @create_queue: Queue creation routine.
  *
@@ -81,15 +81,9 @@ struct device_process_node {
  * @set_cache_memory_policy: Sets memory policy (cached/ non cached) for the
  * memory apertures.
  *
- * This struct is a base class for the kfd queues scheduler in the
- * device level. The device base class should expose the basic operations
- * for queue creation and queue destruction. This base class hides the
- * scheduling mode of the driver and the specific implementation of the
- * concrete device. This class is the only class in the queues scheduler
- * that configures the H/W.
  */
 
-struct device_queue_manager {
+struct device_queue_manager_ops {
        int     (*create_queue)(struct device_queue_manager *dqm,
                                struct queue *q,
                                struct qcm_process_device *qpd,
@@ -124,7 +118,22 @@ struct device_queue_manager {
                                           enum cache_policy alternate_policy,
                                           void __user *alternate_aperture_base,
                                           uint64_t alternate_aperture_size);
+};
+
+/**
+ * struct device_queue_manager
+ *
+ * This struct is a base class for the kfd queues scheduler in the
+ * device level. The device base class should expose the basic operations
+ * for queue creation and queue destruction. This base class hides the
+ * scheduling mode of the driver and the specific implementation of the
+ * concrete device. This class is the only class in the queues scheduler
+ * that configures the H/W.
+ *
+ */
 
+struct device_queue_manager {
+       struct device_queue_manager_ops ops;
 
        struct mqd_manager      *mqds[KFD_MQD_TYPE_MAX];
        struct packet_manager   packets;
index 773c213f2f9ae657bb08abbb0f66638d3d4a8d60..add0fb4cc6589e3385d7a3f05fdc6466865ab602 100644 (file)
@@ -56,7 +56,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
        switch (type) {
        case KFD_QUEUE_TYPE_DIQ:
        case KFD_QUEUE_TYPE_HIQ:
-               kq->mqd = dev->dqm->get_mqd_manager(dev->dqm,
+               kq->mqd = dev->dqm->ops.get_mqd_manager(dev->dqm,
                                                KFD_MQD_TYPE_HIQ);
                break;
        default:
index 948b1ca8e7a24228773a592cf8b50dc298d06599..513eeb6e402a8513f5d1b0ff1875b77d2c50e603 100644 (file)
@@ -178,7 +178,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 
        if (list_empty(&pqm->queues)) {
                pdd->qpd.pqm = pqm;
-               dev->dqm->register_process(dev->dqm, &pdd->qpd);
+               dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
        }
 
        pqn = kzalloc(sizeof(struct process_queue_node), GFP_KERNEL);
@@ -204,7 +204,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
                        goto err_create_queue;
                pqn->q = q;
                pqn->kq = NULL;
-               retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd,
+               retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
                                                &q->properties.vmid);
                print_queue(q);
                break;
@@ -217,7 +217,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
                kq->queue->properties.queue_id = *qid;
                pqn->kq = kq;
                pqn->q = NULL;
-               retval = dev->dqm->create_kernel_queue(dev->dqm, kq, &pdd->qpd);
+               retval = dev->dqm->ops.create_kernel_queue(dev->dqm,
+                                                       kq, &pdd->qpd);
                break;
        default:
                BUG();
@@ -285,13 +286,13 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
        if (pqn->kq) {
                /* destroy kernel queue (DIQ) */
                dqm = pqn->kq->dev->dqm;
-               dqm->destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
+               dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
                kernel_queue_uninit(pqn->kq);
        }
 
        if (pqn->q) {
                dqm = pqn->q->device->dqm;
-               retval = dqm->destroy_queue(dqm, &pdd->qpd, pqn->q);
+               retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
                if (retval != 0)
                        return retval;
 
@@ -303,7 +304,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
        clear_bit(qid, pqm->queue_slot_bitmap);
 
        if (list_empty(&pqm->queues))
-               dqm->unregister_process(dqm, &pdd->qpd);
+               dqm->ops.unregister_process(dqm, &pdd->qpd);
 
        return retval;
 }
@@ -324,7 +325,8 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
        pqn->q->properties.queue_percent = p->queue_percent;
        pqn->q->properties.priority = p->priority;
 
-       retval = pqn->q->device->dqm->update_queue(pqn->q->device->dqm, pqn->q);
+       retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
+                                                       pqn->q);
        if (retval != 0)
                return retval;
 
This page took 0.058744 seconds and 5 git commands to generate.