Merge branch 'for-4.13-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / scheduler / gpu_scheduler.h
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #ifndef _GPU_SCHEDULER_H_
25 #define _GPU_SCHEDULER_H_
26
27 #include <linux/kfifo.h>
28 #include <linux/dma-fence.h>
29
30 struct amd_gpu_scheduler;
31 struct amd_sched_rq;
32
33 /**
34  * A scheduler entity is a wrapper around a job queue or a group
35  * of other entities. Entities take turns emitting jobs from their
36  * job queues to corresponding hardware ring based on scheduling
37  * policy.
38 */
39 struct amd_sched_entity {
40         struct list_head                list;
41         struct amd_sched_rq             *rq;
42         struct amd_gpu_scheduler        *sched;
43
44         spinlock_t                      queue_lock;
45         struct kfifo                    job_queue;
46
47         atomic_t                        fence_seq;
48         uint64_t                        fence_context;
49
50         struct dma_fence                *dependency;
51         struct dma_fence_cb             cb;
52 };
53
54 /**
55  * Run queue is a set of entities scheduling command submissions for
56  * one specific ring. It implements the scheduling policy that selects
57  * the next entity to emit commands from.
58 */
59 struct amd_sched_rq {
60         spinlock_t              lock;
61         struct list_head        entities;
62         struct amd_sched_entity *current_entity;
63 };
64
65 struct amd_sched_fence {
66         struct dma_fence                scheduled;
67         struct dma_fence                finished;
68         struct dma_fence_cb             cb;
69         struct dma_fence                *parent;
70         struct amd_gpu_scheduler        *sched;
71         spinlock_t                      lock;
72         void                            *owner;
73 };
74
75 struct amd_sched_job {
76         struct amd_gpu_scheduler        *sched;
77         struct amd_sched_entity         *s_entity;
78         struct amd_sched_fence          *s_fence;
79         struct dma_fence_cb             finish_cb;
80         struct work_struct              finish_work;
81         struct list_head                node;
82         struct delayed_work             work_tdr;
83         uint64_t                        id;
84         atomic_t karma;
85 };
86
87 extern const struct dma_fence_ops amd_sched_fence_ops_scheduled;
88 extern const struct dma_fence_ops amd_sched_fence_ops_finished;
89 static inline struct amd_sched_fence *to_amd_sched_fence(struct dma_fence *f)
90 {
91         if (f->ops == &amd_sched_fence_ops_scheduled)
92                 return container_of(f, struct amd_sched_fence, scheduled);
93
94         if (f->ops == &amd_sched_fence_ops_finished)
95                 return container_of(f, struct amd_sched_fence, finished);
96
97         return NULL;
98 }
99
100 static inline bool amd_sched_invalidate_job(struct amd_sched_job *s_job, int threshold)
101 {
102         return (s_job && atomic_inc_return(&s_job->karma) > threshold);
103 }
104
105 /**
106  * Define the backend operations called by the scheduler,
107  * these functions should be implemented in driver side
108 */
109 struct amd_sched_backend_ops {
110         struct dma_fence *(*dependency)(struct amd_sched_job *sched_job);
111         struct dma_fence *(*run_job)(struct amd_sched_job *sched_job);
112         void (*timedout_job)(struct amd_sched_job *sched_job);
113         void (*free_job)(struct amd_sched_job *sched_job);
114 };
115
116 enum amd_sched_priority {
117         AMD_SCHED_PRIORITY_MIN,
118         AMD_SCHED_PRIORITY_NORMAL = AMD_SCHED_PRIORITY_MIN,
119         AMD_SCHED_PRIORITY_KERNEL,
120         AMD_SCHED_PRIORITY_MAX
121 };
122
123 /**
124  * One scheduler is implemented for each hardware ring
125 */
126 struct amd_gpu_scheduler {
127         const struct amd_sched_backend_ops      *ops;
128         uint32_t                        hw_submission_limit;
129         long                            timeout;
130         const char                      *name;
131         struct amd_sched_rq             sched_rq[AMD_SCHED_PRIORITY_MAX];
132         wait_queue_head_t               wake_up_worker;
133         wait_queue_head_t               job_scheduled;
134         atomic_t                        hw_rq_count;
135         atomic64_t                      job_id_count;
136         struct task_struct              *thread;
137         struct list_head        ring_mirror_list;
138         spinlock_t                      job_list_lock;
139 };
140
141 int amd_sched_init(struct amd_gpu_scheduler *sched,
142                    const struct amd_sched_backend_ops *ops,
143                    uint32_t hw_submission, long timeout, const char *name);
144 void amd_sched_fini(struct amd_gpu_scheduler *sched);
145
146 int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
147                           struct amd_sched_entity *entity,
148                           struct amd_sched_rq *rq,
149                           uint32_t jobs);
150 void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
151                            struct amd_sched_entity *entity);
152 void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
153
154 int amd_sched_fence_slab_init(void);
155 void amd_sched_fence_slab_fini(void);
156
157 struct amd_sched_fence *amd_sched_fence_create(
158         struct amd_sched_entity *s_entity, void *owner);
159 void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
160 void amd_sched_fence_finished(struct amd_sched_fence *fence);
161 int amd_sched_job_init(struct amd_sched_job *job,
162                        struct amd_gpu_scheduler *sched,
163                        struct amd_sched_entity *entity,
164                        void *owner);
165 void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched);
166 void amd_sched_job_recovery(struct amd_gpu_scheduler *sched);
167 bool amd_sched_dependency_optimized(struct dma_fence* fence,
168                                     struct amd_sched_entity *entity);
169 void amd_sched_job_kickout(struct amd_sched_job *s_job);
170 #endif