]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/m68k/mac/iop.c
Merge tag 'hwmon-for-v5.15-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-jammy-kernel.git] / arch / m68k / mac / iop.c
1 /*
2 * I/O Processor (IOP) management
3 * Written and (C) 1999 by Joshua M. Thompson (funaho@jurai.org)
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice and this list of conditions.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice and this list of conditions in the documentation and/or other
12 * materials provided with the distribution.
13 */
14
15 /*
16 * The IOP chips are used in the IIfx and some Quadras (900, 950) to manage
17 * serial and ADB. They are actually a 6502 processor and some glue logic.
18 *
19 * 990429 (jmt) - Initial implementation, just enough to knock the SCC IOP
20 * into compatible mode so nobody has to fiddle with the
21 * Serial Switch control panel anymore.
22 * 990603 (jmt) - Added code to grab the correct ISM IOP interrupt for OSS
23 * and non-OSS machines (at least I hope it's correct on a
24 * non-OSS machine -- someone with a Q900 or Q950 needs to
25 * check this.)
26 * 990605 (jmt) - Rearranged things a bit wrt IOP detection; iop_present is
27 * gone, IOP base addresses are now in an array and the
28 * globally-visible functions take an IOP number instead of an
29 * an actual base address.
30 * 990610 (jmt) - Finished the message passing framework and it seems to work.
31 * Sending _definitely_ works; my adb-bus.c mods can send
32 * messages and receive the MSG_COMPLETED status back from the
33 * IOP. The trick now is figuring out the message formats.
34 * 990611 (jmt) - More cleanups. Fixed problem where unclaimed messages on a
35 * receive channel were never properly acknowledged. Bracketed
36 * the remaining debug printk's with #ifdef's and disabled
37 * debugging. I can now type on the console.
38 * 990612 (jmt) - Copyright notice added. Reworked the way replies are handled.
39 * It turns out that replies are placed back in the send buffer
40 * for that channel; messages on the receive channels are always
41 * unsolicited messages from the IOP (and our replies to them
42 * should go back in the receive channel.) Also added tracking
43 * of device names to the listener functions ala the interrupt
44 * handlers.
45 * 990729 (jmt) - Added passing of pt_regs structure to IOP handlers. This is
46 * used by the new unified ADB driver.
47 *
48 * TODO:
49 *
50 * o The SCC IOP has to be placed in bypass mode before the serial console
51 * gets initialized. iop_init() would be one place to do that. Or the
52 * bootloader could do that. For now, the Serial Switch control panel
53 * is needed for that -- contrary to the changelog above.
54 * o Something should be periodically checking iop_alive() to make sure the
55 * IOP hasn't died.
56 * o Some of the IOP manager routines need better error checking and
57 * return codes. Nothing major, just prettying up.
58 */
59
60 /*
61 * -----------------------
62 * IOP Message Passing 101
63 * -----------------------
64 *
65 * The host talks to the IOPs using a rather simple message-passing scheme via
66 * a shared memory area in the IOP RAM. Each IOP has seven "channels"; each
67 * channel is connected to a specific software driver on the IOP. For example
68 * on the SCC IOP there is one channel for each serial port. Each channel has
69 * an incoming and and outgoing message queue with a depth of one.
70 *
71 * A message is 32 bytes plus a state byte for the channel (MSG_IDLE, MSG_NEW,
72 * MSG_RCVD, MSG_COMPLETE). To send a message you copy the message into the
73 * buffer, set the state to MSG_NEW and signal the IOP by setting the IRQ flag
74 * in the IOP control to 1. The IOP will move the state to MSG_RCVD when it
75 * receives the message and then to MSG_COMPLETE when the message processing
76 * has completed. It is the host's responsibility at that point to read the
77 * reply back out of the send channel buffer and reset the channel state back
78 * to MSG_IDLE.
79 *
80 * To receive message from the IOP the same procedure is used except the roles
81 * are reversed. That is, the IOP puts message in the channel with a state of
82 * MSG_NEW, and the host receives the message and move its state to MSG_RCVD
83 * and then to MSG_COMPLETE when processing is completed and the reply (if any)
84 * has been placed back in the receive channel. The IOP will then reset the
85 * channel state to MSG_IDLE.
86 *
87 * Two sets of host interrupts are provided, INT0 and INT1. Both appear on one
88 * interrupt level; they are distinguished by a pair of bits in the IOP status
89 * register. The IOP will raise INT0 when one or more messages in the send
90 * channels have gone to the MSG_COMPLETE state and it will raise INT1 when one
91 * or more messages on the receive channels have gone to the MSG_NEW state.
92 *
93 * Since each channel handles only one message we have to implement a small
94 * interrupt-driven queue on our end. Messages to be sent are placed on the
95 * queue for sending and contain a pointer to an optional callback function.
96 * The handler for a message is called when the message state goes to
97 * MSG_COMPLETE.
98 *
99 * For receiving message we maintain a list of handler functions to call when
100 * a message is received on that IOP/channel combination. The handlers are
101 * called much like an interrupt handler and are passed a copy of the message
102 * from the IOP. The message state will be in MSG_RCVD while the handler runs;
103 * it is the handler's responsibility to call iop_complete_message() when
104 * finished; this function moves the message state to MSG_COMPLETE and signals
105 * the IOP. This two-step process is provided to allow the handler to defer
106 * message processing to a bottom-half handler if the processing will take
107 * a significant amount of time (handlers are called at interrupt time so they
108 * should execute quickly.)
109 */
110
111 #include <linux/types.h>
112 #include <linux/kernel.h>
113 #include <linux/mm.h>
114 #include <linux/delay.h>
115 #include <linux/init.h>
116 #include <linux/interrupt.h>
117
118 #include <asm/macintosh.h>
119 #include <asm/macints.h>
120 #include <asm/mac_iop.h>
121
122 #ifdef DEBUG
123 #define iop_pr_debug(fmt, ...) \
124 printk(KERN_DEBUG "%s: " fmt, __func__, ##__VA_ARGS__)
125 #define iop_pr_cont(fmt, ...) \
126 printk(KERN_CONT fmt, ##__VA_ARGS__)
127 #else
128 #define iop_pr_debug(fmt, ...) \
129 no_printk(KERN_DEBUG "%s: " fmt, __func__, ##__VA_ARGS__)
130 #define iop_pr_cont(fmt, ...) \
131 no_printk(KERN_CONT fmt, ##__VA_ARGS__)
132 #endif
133
134 /* Non-zero if the IOPs are present */
135
136 int iop_scc_present, iop_ism_present;
137
138 /* structure for tracking channel listeners */
139
140 struct listener {
141 const char *devname;
142 void (*handler)(struct iop_msg *);
143 };
144
145 /*
146 * IOP structures for the two IOPs
147 *
148 * The SCC IOP controls both serial ports (A and B) as its two functions.
149 * The ISM IOP controls the SWIM (floppy drive) and ADB.
150 */
151
152 static volatile struct mac_iop *iop_base[NUM_IOPS];
153
154 /*
155 * IOP message queues
156 */
157
158 static struct iop_msg iop_msg_pool[NUM_IOP_MSGS];
159 static struct iop_msg *iop_send_queue[NUM_IOPS][NUM_IOP_CHAN];
160 static struct listener iop_listeners[NUM_IOPS][NUM_IOP_CHAN];
161
162 irqreturn_t iop_ism_irq(int, void *);
163
164 /*
165 * Private access functions
166 */
167
168 static __inline__ void iop_loadaddr(volatile struct mac_iop *iop, __u16 addr)
169 {
170 iop->ram_addr_lo = addr;
171 iop->ram_addr_hi = addr >> 8;
172 }
173
174 static __inline__ __u8 iop_readb(volatile struct mac_iop *iop, __u16 addr)
175 {
176 iop->ram_addr_lo = addr;
177 iop->ram_addr_hi = addr >> 8;
178 return iop->ram_data;
179 }
180
181 static __inline__ void iop_writeb(volatile struct mac_iop *iop, __u16 addr, __u8 data)
182 {
183 iop->ram_addr_lo = addr;
184 iop->ram_addr_hi = addr >> 8;
185 iop->ram_data = data;
186 }
187
188 static __inline__ void iop_stop(volatile struct mac_iop *iop)
189 {
190 iop->status_ctrl = IOP_AUTOINC;
191 }
192
193 static __inline__ void iop_start(volatile struct mac_iop *iop)
194 {
195 iop->status_ctrl = IOP_RUN | IOP_AUTOINC;
196 }
197
198 static __inline__ void iop_interrupt(volatile struct mac_iop *iop)
199 {
200 iop->status_ctrl = IOP_IRQ | IOP_RUN | IOP_AUTOINC;
201 }
202
203 static int iop_alive(volatile struct mac_iop *iop)
204 {
205 int retval;
206
207 retval = (iop_readb(iop, IOP_ADDR_ALIVE) == 0xFF);
208 iop_writeb(iop, IOP_ADDR_ALIVE, 0);
209 return retval;
210 }
211
212 static struct iop_msg *iop_get_unused_msg(void)
213 {
214 int i;
215 unsigned long flags;
216
217 local_irq_save(flags);
218
219 for (i = 0 ; i < NUM_IOP_MSGS ; i++) {
220 if (iop_msg_pool[i].status == IOP_MSGSTATUS_UNUSED) {
221 iop_msg_pool[i].status = IOP_MSGSTATUS_WAITING;
222 local_irq_restore(flags);
223 return &iop_msg_pool[i];
224 }
225 }
226
227 local_irq_restore(flags);
228 return NULL;
229 }
230
231 /*
232 * Initialize the IOPs, if present.
233 */
234
235 void __init iop_init(void)
236 {
237 int i;
238
239 if (macintosh_config->scc_type == MAC_SCC_IOP) {
240 if (macintosh_config->ident == MAC_MODEL_IIFX)
241 iop_base[IOP_NUM_SCC] = (struct mac_iop *)SCC_IOP_BASE_IIFX;
242 else
243 iop_base[IOP_NUM_SCC] = (struct mac_iop *)SCC_IOP_BASE_QUADRA;
244 iop_scc_present = 1;
245 pr_debug("SCC IOP detected at %p\n", iop_base[IOP_NUM_SCC]);
246 }
247 if (macintosh_config->adb_type == MAC_ADB_IOP) {
248 if (macintosh_config->ident == MAC_MODEL_IIFX)
249 iop_base[IOP_NUM_ISM] = (struct mac_iop *)ISM_IOP_BASE_IIFX;
250 else
251 iop_base[IOP_NUM_ISM] = (struct mac_iop *)ISM_IOP_BASE_QUADRA;
252 iop_ism_present = 1;
253 pr_debug("ISM IOP detected at %p\n", iop_base[IOP_NUM_ISM]);
254
255 iop_stop(iop_base[IOP_NUM_ISM]);
256 iop_start(iop_base[IOP_NUM_ISM]);
257 iop_alive(iop_base[IOP_NUM_ISM]); /* clears the alive flag */
258 }
259
260 /* Make the whole pool available and empty the queues */
261
262 for (i = 0 ; i < NUM_IOP_MSGS ; i++) {
263 iop_msg_pool[i].status = IOP_MSGSTATUS_UNUSED;
264 }
265
266 for (i = 0 ; i < NUM_IOP_CHAN ; i++) {
267 iop_send_queue[IOP_NUM_SCC][i] = NULL;
268 iop_send_queue[IOP_NUM_ISM][i] = NULL;
269 iop_listeners[IOP_NUM_SCC][i].devname = NULL;
270 iop_listeners[IOP_NUM_SCC][i].handler = NULL;
271 iop_listeners[IOP_NUM_ISM][i].devname = NULL;
272 iop_listeners[IOP_NUM_ISM][i].handler = NULL;
273 }
274 }
275
276 /*
277 * Register the interrupt handler for the IOPs.
278 */
279
280 void __init iop_register_interrupts(void)
281 {
282 if (iop_ism_present) {
283 if (macintosh_config->ident == MAC_MODEL_IIFX) {
284 if (request_irq(IRQ_MAC_ADB, iop_ism_irq, 0,
285 "ISM IOP", (void *)IOP_NUM_ISM))
286 pr_err("Couldn't register ISM IOP interrupt\n");
287 } else {
288 if (request_irq(IRQ_VIA2_0, iop_ism_irq, 0, "ISM IOP",
289 (void *)IOP_NUM_ISM))
290 pr_err("Couldn't register ISM IOP interrupt\n");
291 }
292 if (!iop_alive(iop_base[IOP_NUM_ISM])) {
293 pr_warn("IOP: oh my god, they killed the ISM IOP!\n");
294 } else {
295 pr_warn("IOP: the ISM IOP seems to be alive.\n");
296 }
297 }
298 }
299
300 /*
301 * Register or unregister a listener for a specific IOP and channel
302 *
303 * If the handler pointer is NULL the current listener (if any) is
304 * unregistered. Otherwise the new listener is registered provided
305 * there is no existing listener registered.
306 */
307
308 int iop_listen(uint iop_num, uint chan,
309 void (*handler)(struct iop_msg *),
310 const char *devname)
311 {
312 if ((iop_num >= NUM_IOPS) || !iop_base[iop_num]) return -EINVAL;
313 if (chan >= NUM_IOP_CHAN) return -EINVAL;
314 if (iop_listeners[iop_num][chan].handler && handler) return -EINVAL;
315 iop_listeners[iop_num][chan].devname = devname;
316 iop_listeners[iop_num][chan].handler = handler;
317 return 0;
318 }
319
320 /*
321 * Complete reception of a message, which just means copying the reply
322 * into the buffer, setting the channel state to MSG_COMPLETE and
323 * notifying the IOP.
324 */
325
326 void iop_complete_message(struct iop_msg *msg)
327 {
328 int iop_num = msg->iop_num;
329 int chan = msg->channel;
330 int i,offset;
331
332 iop_pr_debug("iop_num %d chan %d reply %*ph\n",
333 msg->iop_num, msg->channel, IOP_MSG_LEN, msg->reply);
334
335 offset = IOP_ADDR_RECV_MSG + (msg->channel * IOP_MSG_LEN);
336
337 for (i = 0 ; i < IOP_MSG_LEN ; i++, offset++) {
338 iop_writeb(iop_base[iop_num], offset, msg->reply[i]);
339 }
340
341 iop_writeb(iop_base[iop_num],
342 IOP_ADDR_RECV_STATE + chan, IOP_MSG_COMPLETE);
343 iop_interrupt(iop_base[msg->iop_num]);
344
345 msg->status = IOP_MSGSTATUS_UNUSED;
346 }
347
348 /*
349 * Actually put a message into a send channel buffer
350 */
351
352 static void iop_do_send(struct iop_msg *msg)
353 {
354 volatile struct mac_iop *iop = iop_base[msg->iop_num];
355 int i,offset;
356
357 iop_pr_debug("iop_num %d chan %d message %*ph\n",
358 msg->iop_num, msg->channel, IOP_MSG_LEN, msg->message);
359
360 offset = IOP_ADDR_SEND_MSG + (msg->channel * IOP_MSG_LEN);
361
362 for (i = 0 ; i < IOP_MSG_LEN ; i++, offset++) {
363 iop_writeb(iop, offset, msg->message[i]);
364 }
365
366 iop_writeb(iop, IOP_ADDR_SEND_STATE + msg->channel, IOP_MSG_NEW);
367
368 iop_interrupt(iop);
369 }
370
371 /*
372 * Handle sending a message on a channel that
373 * has gone into the IOP_MSG_COMPLETE state.
374 */
375
376 static void iop_handle_send(uint iop_num, uint chan)
377 {
378 volatile struct mac_iop *iop = iop_base[iop_num];
379 struct iop_msg *msg;
380 int i,offset;
381
382 iop_writeb(iop, IOP_ADDR_SEND_STATE + chan, IOP_MSG_IDLE);
383
384 if (!(msg = iop_send_queue[iop_num][chan])) return;
385
386 msg->status = IOP_MSGSTATUS_COMPLETE;
387 offset = IOP_ADDR_SEND_MSG + (chan * IOP_MSG_LEN);
388 for (i = 0 ; i < IOP_MSG_LEN ; i++, offset++) {
389 msg->reply[i] = iop_readb(iop, offset);
390 }
391 iop_pr_debug("iop_num %d chan %d reply %*ph\n",
392 iop_num, chan, IOP_MSG_LEN, msg->reply);
393
394 if (msg->handler) (*msg->handler)(msg);
395 msg->status = IOP_MSGSTATUS_UNUSED;
396 msg = msg->next;
397 iop_send_queue[iop_num][chan] = msg;
398 if (msg && iop_readb(iop, IOP_ADDR_SEND_STATE + chan) == IOP_MSG_IDLE)
399 iop_do_send(msg);
400 }
401
402 /*
403 * Handle reception of a message on a channel that has
404 * gone into the IOP_MSG_NEW state.
405 */
406
407 static void iop_handle_recv(uint iop_num, uint chan)
408 {
409 volatile struct mac_iop *iop = iop_base[iop_num];
410 int i,offset;
411 struct iop_msg *msg;
412
413 msg = iop_get_unused_msg();
414 msg->iop_num = iop_num;
415 msg->channel = chan;
416 msg->status = IOP_MSGSTATUS_UNSOL;
417 msg->handler = iop_listeners[iop_num][chan].handler;
418
419 offset = IOP_ADDR_RECV_MSG + (chan * IOP_MSG_LEN);
420
421 for (i = 0 ; i < IOP_MSG_LEN ; i++, offset++) {
422 msg->message[i] = iop_readb(iop, offset);
423 }
424 iop_pr_debug("iop_num %d chan %d message %*ph\n",
425 iop_num, chan, IOP_MSG_LEN, msg->message);
426
427 iop_writeb(iop, IOP_ADDR_RECV_STATE + chan, IOP_MSG_RCVD);
428
429 /* If there is a listener, call it now. Otherwise complete */
430 /* the message ourselves to avoid possible stalls. */
431
432 if (msg->handler) {
433 (*msg->handler)(msg);
434 } else {
435 memset(msg->reply, 0, IOP_MSG_LEN);
436 iop_complete_message(msg);
437 }
438 }
439
440 /*
441 * Send a message
442 *
443 * The message is placed at the end of the send queue. Afterwards if the
444 * channel is idle we force an immediate send of the next message in the
445 * queue.
446 */
447
448 int iop_send_message(uint iop_num, uint chan, void *privdata,
449 uint msg_len, __u8 *msg_data,
450 void (*handler)(struct iop_msg *))
451 {
452 struct iop_msg *msg, *q;
453
454 if ((iop_num >= NUM_IOPS) || !iop_base[iop_num]) return -EINVAL;
455 if (chan >= NUM_IOP_CHAN) return -EINVAL;
456 if (msg_len > IOP_MSG_LEN) return -EINVAL;
457
458 msg = iop_get_unused_msg();
459 if (!msg) return -ENOMEM;
460
461 msg->next = NULL;
462 msg->status = IOP_MSGSTATUS_WAITING;
463 msg->iop_num = iop_num;
464 msg->channel = chan;
465 msg->caller_priv = privdata;
466 memcpy(msg->message, msg_data, msg_len);
467 msg->handler = handler;
468
469 if (!(q = iop_send_queue[iop_num][chan])) {
470 iop_send_queue[iop_num][chan] = msg;
471 iop_do_send(msg);
472 } else {
473 while (q->next) q = q->next;
474 q->next = msg;
475 }
476
477 return 0;
478 }
479
480 /*
481 * Upload code to the shared RAM of an IOP.
482 */
483
484 void iop_upload_code(uint iop_num, __u8 *code_start,
485 uint code_len, __u16 shared_ram_start)
486 {
487 if ((iop_num >= NUM_IOPS) || !iop_base[iop_num]) return;
488
489 iop_loadaddr(iop_base[iop_num], shared_ram_start);
490
491 while (code_len--) {
492 iop_base[iop_num]->ram_data = *code_start++;
493 }
494 }
495
496 /*
497 * Download code from the shared RAM of an IOP.
498 */
499
500 void iop_download_code(uint iop_num, __u8 *code_start,
501 uint code_len, __u16 shared_ram_start)
502 {
503 if ((iop_num >= NUM_IOPS) || !iop_base[iop_num]) return;
504
505 iop_loadaddr(iop_base[iop_num], shared_ram_start);
506
507 while (code_len--) {
508 *code_start++ = iop_base[iop_num]->ram_data;
509 }
510 }
511
512 /*
513 * Compare the code in the shared RAM of an IOP with a copy in system memory
514 * and return 0 on match or the first nonmatching system memory address on
515 * failure.
516 */
517
518 __u8 *iop_compare_code(uint iop_num, __u8 *code_start,
519 uint code_len, __u16 shared_ram_start)
520 {
521 if ((iop_num >= NUM_IOPS) || !iop_base[iop_num]) return code_start;
522
523 iop_loadaddr(iop_base[iop_num], shared_ram_start);
524
525 while (code_len--) {
526 if (*code_start != iop_base[iop_num]->ram_data) {
527 return code_start;
528 }
529 code_start++;
530 }
531 return (__u8 *) 0;
532 }
533
534 /*
535 * Handle an ISM IOP interrupt
536 */
537
538 irqreturn_t iop_ism_irq(int irq, void *dev_id)
539 {
540 uint iop_num = (uint) dev_id;
541 volatile struct mac_iop *iop = iop_base[iop_num];
542 int i,state;
543 u8 events = iop->status_ctrl & (IOP_INT0 | IOP_INT1);
544
545 do {
546 iop_pr_debug("iop_num %d status %02X\n", iop_num,
547 iop->status_ctrl);
548
549 /* INT0 indicates state change on an outgoing message channel */
550 if (events & IOP_INT0) {
551 iop->status_ctrl = IOP_INT0 | IOP_RUN | IOP_AUTOINC;
552 for (i = 0; i < NUM_IOP_CHAN; i++) {
553 state = iop_readb(iop, IOP_ADDR_SEND_STATE + i);
554 if (state == IOP_MSG_COMPLETE)
555 iop_handle_send(iop_num, i);
556 else if (state != IOP_MSG_IDLE)
557 iop_pr_debug("chan %d send state %02X\n",
558 i, state);
559 }
560 }
561
562 /* INT1 for incoming messages */
563 if (events & IOP_INT1) {
564 iop->status_ctrl = IOP_INT1 | IOP_RUN | IOP_AUTOINC;
565 for (i = 0; i < NUM_IOP_CHAN; i++) {
566 state = iop_readb(iop, IOP_ADDR_RECV_STATE + i);
567 if (state == IOP_MSG_NEW)
568 iop_handle_recv(iop_num, i);
569 else if (state != IOP_MSG_IDLE)
570 iop_pr_debug("chan %d recv state %02X\n",
571 i, state);
572 }
573 }
574
575 events = iop->status_ctrl & (IOP_INT0 | IOP_INT1);
576 } while (events);
577
578 return IRQ_HANDLED;
579 }
580
581 void iop_ism_irq_poll(uint iop_num)
582 {
583 unsigned long flags;
584
585 local_irq_save(flags);
586 iop_ism_irq(0, (void *)iop_num);
587 local_irq_restore(flags);
588 }