Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
[sfrench/cifs-2.6.git] / arch / powerpc / platforms / cell / spufs / hw_ops.c
1 /* hw_ops.c - query/set operations on active SPU context.
2  *
3  * Copyright (C) IBM 2005
4  * Author: Mark Nutter <mnutter@us.ibm.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2, or (at your option)
9  * any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19  */
20
21 #include <linux/module.h>
22 #include <linux/errno.h>
23 #include <linux/sched.h>
24 #include <linux/kernel.h>
25 #include <linux/mm.h>
26 #include <linux/poll.h>
27 #include <linux/smp.h>
28 #include <linux/smp_lock.h>
29 #include <linux/stddef.h>
30 #include <linux/unistd.h>
31
32 #include <asm/io.h>
33 #include <asm/spu.h>
34 #include <asm/spu_priv1.h>
35 #include <asm/spu_csa.h>
36 #include <asm/mmu_context.h>
37 #include "spufs.h"
38
39 static int spu_hw_mbox_read(struct spu_context *ctx, u32 * data)
40 {
41         struct spu *spu = ctx->spu;
42         struct spu_problem __iomem *prob = spu->problem;
43         u32 mbox_stat;
44         int ret = 0;
45
46         spin_lock_irq(&spu->register_lock);
47         mbox_stat = in_be32(&prob->mb_stat_R);
48         if (mbox_stat & 0x0000ff) {
49                 *data = in_be32(&prob->pu_mb_R);
50                 ret = 4;
51         }
52         spin_unlock_irq(&spu->register_lock);
53         return ret;
54 }
55
56 static u32 spu_hw_mbox_stat_read(struct spu_context *ctx)
57 {
58         return in_be32(&ctx->spu->problem->mb_stat_R);
59 }
60
61 static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx,
62                                           unsigned int events)
63 {
64         struct spu *spu = ctx->spu;
65         int ret = 0;
66         u32 stat;
67
68         spin_lock_irq(&spu->register_lock);
69         stat = in_be32(&spu->problem->mb_stat_R);
70
71         /* if the requested event is there, return the poll
72            mask, otherwise enable the interrupt to get notified,
73            but first mark any pending interrupts as done so
74            we don't get woken up unnecessarily */
75
76         if (events & (POLLIN | POLLRDNORM)) {
77                 if (stat & 0xff0000)
78                         ret |= POLLIN | POLLRDNORM;
79                 else {
80                         spu_int_stat_clear(spu, 2, 0x1);
81                         spu_int_mask_or(spu, 2, 0x1);
82                 }
83         }
84         if (events & (POLLOUT | POLLWRNORM)) {
85                 if (stat & 0x00ff00)
86                         ret = POLLOUT | POLLWRNORM;
87                 else {
88                         spu_int_stat_clear(spu, 2, 0x10);
89                         spu_int_mask_or(spu, 2, 0x10);
90                 }
91         }
92         spin_unlock_irq(&spu->register_lock);
93         return ret;
94 }
95
96 static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data)
97 {
98         struct spu *spu = ctx->spu;
99         struct spu_problem __iomem *prob = spu->problem;
100         struct spu_priv2 __iomem *priv2 = spu->priv2;
101         int ret;
102
103         spin_lock_irq(&spu->register_lock);
104         if (in_be32(&prob->mb_stat_R) & 0xff0000) {
105                 /* read the first available word */
106                 *data = in_be64(&priv2->puint_mb_R);
107                 ret = 4;
108         } else {
109                 /* make sure we get woken up by the interrupt */
110                 spu_int_mask_or(spu, 2, 0x1);
111                 ret = 0;
112         }
113         spin_unlock_irq(&spu->register_lock);
114         return ret;
115 }
116
117 static int spu_hw_wbox_write(struct spu_context *ctx, u32 data)
118 {
119         struct spu *spu = ctx->spu;
120         struct spu_problem __iomem *prob = spu->problem;
121         int ret;
122
123         spin_lock_irq(&spu->register_lock);
124         if (in_be32(&prob->mb_stat_R) & 0x00ff00) {
125                 /* we have space to write wbox_data to */
126                 out_be32(&prob->spu_mb_W, data);
127                 ret = 4;
128         } else {
129                 /* make sure we get woken up by the interrupt when space
130                    becomes available */
131                 spu_int_mask_or(spu, 2, 0x10);
132                 ret = 0;
133         }
134         spin_unlock_irq(&spu->register_lock);
135         return ret;
136 }
137
138 static void spu_hw_signal1_write(struct spu_context *ctx, u32 data)
139 {
140         out_be32(&ctx->spu->problem->signal_notify1, data);
141 }
142
143 static void spu_hw_signal2_write(struct spu_context *ctx, u32 data)
144 {
145         out_be32(&ctx->spu->problem->signal_notify2, data);
146 }
147
148 static void spu_hw_signal1_type_set(struct spu_context *ctx, u64 val)
149 {
150         struct spu *spu = ctx->spu;
151         struct spu_priv2 __iomem *priv2 = spu->priv2;
152         u64 tmp;
153
154         spin_lock_irq(&spu->register_lock);
155         tmp = in_be64(&priv2->spu_cfg_RW);
156         if (val)
157                 tmp |= 1;
158         else
159                 tmp &= ~1;
160         out_be64(&priv2->spu_cfg_RW, tmp);
161         spin_unlock_irq(&spu->register_lock);
162 }
163
164 static u64 spu_hw_signal1_type_get(struct spu_context *ctx)
165 {
166         return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 1) != 0);
167 }
168
169 static void spu_hw_signal2_type_set(struct spu_context *ctx, u64 val)
170 {
171         struct spu *spu = ctx->spu;
172         struct spu_priv2 __iomem *priv2 = spu->priv2;
173         u64 tmp;
174
175         spin_lock_irq(&spu->register_lock);
176         tmp = in_be64(&priv2->spu_cfg_RW);
177         if (val)
178                 tmp |= 2;
179         else
180                 tmp &= ~2;
181         out_be64(&priv2->spu_cfg_RW, tmp);
182         spin_unlock_irq(&spu->register_lock);
183 }
184
185 static u64 spu_hw_signal2_type_get(struct spu_context *ctx)
186 {
187         return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 2) != 0);
188 }
189
190 static u32 spu_hw_npc_read(struct spu_context *ctx)
191 {
192         return in_be32(&ctx->spu->problem->spu_npc_RW);
193 }
194
195 static void spu_hw_npc_write(struct spu_context *ctx, u32 val)
196 {
197         out_be32(&ctx->spu->problem->spu_npc_RW, val);
198 }
199
200 static u32 spu_hw_status_read(struct spu_context *ctx)
201 {
202         return in_be32(&ctx->spu->problem->spu_status_R);
203 }
204
205 static char *spu_hw_get_ls(struct spu_context *ctx)
206 {
207         return ctx->spu->local_store;
208 }
209
210 static u32 spu_hw_runcntl_read(struct spu_context *ctx)
211 {
212         return in_be32(&ctx->spu->problem->spu_runcntl_RW);
213 }
214
215 static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val)
216 {
217         spin_lock_irq(&ctx->spu->register_lock);
218         if (val & SPU_RUNCNTL_ISOLATE)
219                 out_be64(&ctx->spu->priv2->spu_privcntl_RW, 4LL);
220         out_be32(&ctx->spu->problem->spu_runcntl_RW, val);
221         spin_unlock_irq(&ctx->spu->register_lock);
222 }
223
224 static void spu_hw_master_start(struct spu_context *ctx)
225 {
226         struct spu *spu = ctx->spu;
227         u64 sr1;
228
229         spin_lock_irq(&spu->register_lock);
230         sr1 = spu_mfc_sr1_get(spu) | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
231         spu_mfc_sr1_set(spu, sr1);
232         spin_unlock_irq(&spu->register_lock);
233 }
234
235 static void spu_hw_master_stop(struct spu_context *ctx)
236 {
237         struct spu *spu = ctx->spu;
238         u64 sr1;
239
240         spin_lock_irq(&spu->register_lock);
241         sr1 = spu_mfc_sr1_get(spu) & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
242         spu_mfc_sr1_set(spu, sr1);
243         spin_unlock_irq(&spu->register_lock);
244 }
245
246 static int spu_hw_set_mfc_query(struct spu_context * ctx, u32 mask, u32 mode)
247 {
248         struct spu_problem __iomem *prob = ctx->spu->problem;
249         int ret;
250
251         spin_lock_irq(&ctx->spu->register_lock);
252         ret = -EAGAIN;
253         if (in_be32(&prob->dma_querytype_RW))
254                 goto out;
255         ret = 0;
256         out_be32(&prob->dma_querymask_RW, mask);
257         out_be32(&prob->dma_querytype_RW, mode);
258 out:
259         spin_unlock_irq(&ctx->spu->register_lock);
260         return ret;
261 }
262
263 static u32 spu_hw_read_mfc_tagstatus(struct spu_context * ctx)
264 {
265         return in_be32(&ctx->spu->problem->dma_tagstatus_R);
266 }
267
268 static u32 spu_hw_get_mfc_free_elements(struct spu_context *ctx)
269 {
270         return in_be32(&ctx->spu->problem->dma_qstatus_R);
271 }
272
273 static int spu_hw_send_mfc_command(struct spu_context *ctx,
274                                         struct mfc_dma_command *cmd)
275 {
276         u32 status;
277         struct spu_problem __iomem *prob = ctx->spu->problem;
278
279         spin_lock_irq(&ctx->spu->register_lock);
280         out_be32(&prob->mfc_lsa_W, cmd->lsa);
281         out_be64(&prob->mfc_ea_W, cmd->ea);
282         out_be32(&prob->mfc_union_W.by32.mfc_size_tag32,
283                                 cmd->size << 16 | cmd->tag);
284         out_be32(&prob->mfc_union_W.by32.mfc_class_cmd32,
285                                 cmd->class << 16 | cmd->cmd);
286         status = in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32);
287         spin_unlock_irq(&ctx->spu->register_lock);
288
289         switch (status & 0xffff) {
290         case 0:
291                 return 0;
292         case 2:
293                 return -EAGAIN;
294         default:
295                 return -EINVAL;
296         }
297 }
298
299 static void spu_hw_restart_dma(struct spu_context *ctx)
300 {
301         struct spu_priv2 __iomem *priv2 = ctx->spu->priv2;
302
303         if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &ctx->spu->flags))
304                 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
305 }
306
307 struct spu_context_ops spu_hw_ops = {
308         .mbox_read = spu_hw_mbox_read,
309         .mbox_stat_read = spu_hw_mbox_stat_read,
310         .mbox_stat_poll = spu_hw_mbox_stat_poll,
311         .ibox_read = spu_hw_ibox_read,
312         .wbox_write = spu_hw_wbox_write,
313         .signal1_write = spu_hw_signal1_write,
314         .signal2_write = spu_hw_signal2_write,
315         .signal1_type_set = spu_hw_signal1_type_set,
316         .signal1_type_get = spu_hw_signal1_type_get,
317         .signal2_type_set = spu_hw_signal2_type_set,
318         .signal2_type_get = spu_hw_signal2_type_get,
319         .npc_read = spu_hw_npc_read,
320         .npc_write = spu_hw_npc_write,
321         .status_read = spu_hw_status_read,
322         .get_ls = spu_hw_get_ls,
323         .runcntl_read = spu_hw_runcntl_read,
324         .runcntl_write = spu_hw_runcntl_write,
325         .master_start = spu_hw_master_start,
326         .master_stop = spu_hw_master_stop,
327         .set_mfc_query = spu_hw_set_mfc_query,
328         .read_mfc_tagstatus = spu_hw_read_mfc_tagstatus,
329         .get_mfc_free_elements = spu_hw_get_mfc_free_elements,
330         .send_mfc_command = spu_hw_send_mfc_command,
331         .restart_dma = spu_hw_restart_dma,
332 };