1 /* ds.c: Domain Services driver for Logical Domains
3 * Copyright (C) 2007 David S. Miller <davem@davemloft.net>
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/string.h>
11 #include <linux/slab.h>
12 #include <linux/sched.h>
13 #include <linux/delay.h>
14 #include <linux/mutex.h>
15 #include <linux/workqueue.h>
16 #include <linux/cpu.h>
20 #include <asm/power.h>
21 #include <asm/mdesc.h>
24 #include <asm/hvtramp.h>
26 #define DRV_MODULE_NAME "ds"
27 #define PFX DRV_MODULE_NAME ": "
28 #define DRV_MODULE_VERSION "1.0"
29 #define DRV_MODULE_RELDATE "Jul 11, 2007"
31 static char version
[] __devinitdata
=
32 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
33 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
34 MODULE_DESCRIPTION("Sun LDOM domain services driver");
35 MODULE_LICENSE("GPL");
36 MODULE_VERSION(DRV_MODULE_VERSION
);
40 #define DS_INIT_REQ 0x00
41 #define DS_INIT_ACK 0x01
42 #define DS_INIT_NACK 0x02
43 #define DS_REG_REQ 0x03
44 #define DS_REG_ACK 0x04
45 #define DS_REG_NACK 0x05
46 #define DS_UNREG_REQ 0x06
47 #define DS_UNREG_ACK 0x07
48 #define DS_UNREG_NACK 0x08
57 #define DS_REG_VER_NACK 0x01
58 #define DS_REG_DUP 0x02
59 #define DS_INV_HDL 0x03
60 #define DS_TYPE_UNKNOWN 0x04
68 struct ds_msg_tag tag
;
69 struct ds_version ver
;
73 struct ds_msg_tag tag
;
78 struct ds_msg_tag tag
;
83 struct ds_msg_tag tag
;
91 struct ds_msg_tag tag
;
97 struct ds_msg_tag tag
;
102 struct ds_unreg_req
{
103 struct ds_msg_tag tag
;
107 struct ds_unreg_ack
{
108 struct ds_msg_tag tag
;
112 struct ds_unreg_nack
{
113 struct ds_msg_tag tag
;
118 struct ds_msg_tag tag
;
122 struct ds_data_nack
{
123 struct ds_msg_tag tag
;
128 struct ds_cap_state
{
131 void (*data
)(struct ldc_channel
*lp
,
132 struct ds_cap_state
*cp
,
135 const char *service_id
;
138 #define CAP_STATE_UNKNOWN 0x00
139 #define CAP_STATE_REG_SENT 0x01
140 #define CAP_STATE_REGISTERED 0x02
143 static void md_update_data(struct ldc_channel
*lp
, struct ds_cap_state
*cp
,
145 static void domain_shutdown_data(struct ldc_channel
*lp
,
146 struct ds_cap_state
*cp
,
148 static void domain_panic_data(struct ldc_channel
*lp
,
149 struct ds_cap_state
*cp
,
151 static void dr_cpu_data(struct ldc_channel
*lp
,
152 struct ds_cap_state
*cp
,
154 static void ds_pri_data(struct ldc_channel
*lp
,
155 struct ds_cap_state
*cp
,
157 static void ds_var_data(struct ldc_channel
*lp
,
158 struct ds_cap_state
*cp
,
161 struct ds_cap_state ds_states
[] = {
163 .service_id
= "md-update",
164 .data
= md_update_data
,
167 .service_id
= "domain-shutdown",
168 .data
= domain_shutdown_data
,
171 .service_id
= "domain-panic",
172 .data
= domain_panic_data
,
175 .service_id
= "dr-cpu",
183 .service_id
= "var-config",
187 .service_id
= "var-config-backup",
192 static DEFINE_SPINLOCK(ds_lock
);
195 struct ldc_channel
*lp
;
197 #define DS_HS_START 0x01
198 #define DS_HS_DONE 0x02
204 static struct ds_info
*ds_info
;
206 static struct ds_cap_state
*find_cap(u64 handle
)
208 unsigned int index
= handle
>> 32;
210 if (index
>= ARRAY_SIZE(ds_states
))
212 return &ds_states
[index
];
215 static struct ds_cap_state
*find_cap_by_string(const char *name
)
219 for (i
= 0; i
< ARRAY_SIZE(ds_states
); i
++) {
220 if (strcmp(ds_states
[i
].service_id
, name
))
223 return &ds_states
[i
];
228 static int ds_send(struct ldc_channel
*lp
, void *data
, int len
)
230 int err
, limit
= 1000;
233 while (limit
-- > 0) {
234 err
= ldc_write(lp
, data
, len
);
235 if (!err
|| (err
!= -EAGAIN
))
243 struct ds_md_update_req
{
247 struct ds_md_update_res
{
252 static void md_update_data(struct ldc_channel
*lp
,
253 struct ds_cap_state
*dp
,
256 struct ds_data
*dpkt
= buf
;
257 struct ds_md_update_req
*rp
;
260 struct ds_md_update_res res
;
263 rp
= (struct ds_md_update_req
*) (dpkt
+ 1);
265 printk(KERN_INFO PFX
"Machine description update.\n");
267 memset(&pkt
, 0, sizeof(pkt
));
268 pkt
.data
.tag
.type
= DS_DATA
;
269 pkt
.data
.tag
.len
= sizeof(pkt
) - sizeof(struct ds_msg_tag
);
270 pkt
.data
.handle
= dp
->handle
;
271 pkt
.res
.req_num
= rp
->req_num
;
272 pkt
.res
.result
= DS_OK
;
274 ds_send(lp
, &pkt
, sizeof(pkt
));
279 struct ds_shutdown_req
{
284 struct ds_shutdown_res
{
290 static void domain_shutdown_data(struct ldc_channel
*lp
,
291 struct ds_cap_state
*dp
,
294 struct ds_data
*dpkt
= buf
;
295 struct ds_shutdown_req
*rp
;
298 struct ds_shutdown_res res
;
301 rp
= (struct ds_shutdown_req
*) (dpkt
+ 1);
303 printk(KERN_ALERT PFX
"Shutdown request from "
304 "LDOM manager received.\n");
306 memset(&pkt
, 0, sizeof(pkt
));
307 pkt
.data
.tag
.type
= DS_DATA
;
308 pkt
.data
.tag
.len
= sizeof(pkt
) - sizeof(struct ds_msg_tag
);
309 pkt
.data
.handle
= dp
->handle
;
310 pkt
.res
.req_num
= rp
->req_num
;
311 pkt
.res
.result
= DS_OK
;
312 pkt
.res
.reason
[0] = 0;
314 ds_send(lp
, &pkt
, sizeof(pkt
));
319 struct ds_panic_req
{
323 struct ds_panic_res
{
329 static void domain_panic_data(struct ldc_channel
*lp
,
330 struct ds_cap_state
*dp
,
333 struct ds_data
*dpkt
= buf
;
334 struct ds_panic_req
*rp
;
337 struct ds_panic_res res
;
340 rp
= (struct ds_panic_req
*) (dpkt
+ 1);
342 printk(KERN_ALERT PFX
"Panic request from "
343 "LDOM manager received.\n");
345 memset(&pkt
, 0, sizeof(pkt
));
346 pkt
.data
.tag
.type
= DS_DATA
;
347 pkt
.data
.tag
.len
= sizeof(pkt
) - sizeof(struct ds_msg_tag
);
348 pkt
.data
.handle
= dp
->handle
;
349 pkt
.res
.req_num
= rp
->req_num
;
350 pkt
.res
.result
= DS_OK
;
351 pkt
.res
.reason
[0] = 0;
353 ds_send(lp
, &pkt
, sizeof(pkt
));
355 panic("PANIC requested by LDOM manager.");
361 #define DR_CPU_CONFIGURE 0x43
362 #define DR_CPU_UNCONFIGURE 0x55
363 #define DR_CPU_FORCE_UNCONFIGURE 0x46
364 #define DR_CPU_STATUS 0x53
367 #define DR_CPU_OK 0x6f
368 #define DR_CPU_ERROR 0x65
373 struct dr_cpu_resp_entry
{
376 #define DR_CPU_RES_OK 0x00
377 #define DR_CPU_RES_FAILURE 0x01
378 #define DR_CPU_RES_BLOCKED 0x02
379 #define DR_CPU_RES_CPU_NOT_RESPONDING 0x03
380 #define DR_CPU_RES_NOT_IN_MD 0x04
383 #define DR_CPU_STAT_NOT_PRESENT 0x00
384 #define DR_CPU_STAT_UNCONFIGURED 0x01
385 #define DR_CPU_STAT_CONFIGURED 0x02
390 /* XXX Put this in some common place. XXX */
391 static unsigned long kimage_addr_to_ra(void *p
)
393 unsigned long val
= (unsigned long) p
;
395 return kern_base
+ (val
- KERNBASE
);
398 void ldom_startcpu_cpuid(unsigned int cpu
, unsigned long thread_reg
)
400 extern unsigned long sparc64_ttable_tl0
;
401 extern unsigned long kern_locked_tte_data
;
402 extern int bigkernel
;
403 struct hvtramp_descr
*hdesc
;
404 unsigned long trampoline_ra
;
405 struct trap_per_cpu
*tb
;
406 u64 tte_vaddr
, tte_data
;
407 unsigned long hv_err
;
409 hdesc
= kzalloc(sizeof(*hdesc
), GFP_KERNEL
);
411 printk(KERN_ERR PFX
"ldom_startcpu_cpuid: Cannot allocate "
417 hdesc
->num_mappings
= (bigkernel
? 2 : 1);
419 tb
= &trap_block
[cpu
];
422 hdesc
->fault_info_va
= (unsigned long) &tb
->fault_info
;
423 hdesc
->fault_info_pa
= kimage_addr_to_ra(&tb
->fault_info
);
425 hdesc
->thread_reg
= thread_reg
;
427 tte_vaddr
= (unsigned long) KERNBASE
;
428 tte_data
= kern_locked_tte_data
;
430 hdesc
->maps
[0].vaddr
= tte_vaddr
;
431 hdesc
->maps
[0].tte
= tte_data
;
433 tte_vaddr
+= 0x400000;
434 tte_data
+= 0x400000;
435 hdesc
->maps
[1].vaddr
= tte_vaddr
;
436 hdesc
->maps
[1].tte
= tte_data
;
439 trampoline_ra
= kimage_addr_to_ra(hv_cpu_startup
);
441 hv_err
= sun4v_cpu_start(cpu
, trampoline_ra
,
442 kimage_addr_to_ra(&sparc64_ttable_tl0
),
446 /* DR cpu requests get queued onto the work list by the
447 * dr_cpu_data() callback. The list is protected by
448 * ds_lock, and processed by dr_cpu_process() in order.
450 static LIST_HEAD(dr_cpu_work_list
);
452 struct dr_cpu_queue_entry
{
453 struct list_head list
;
457 static void __dr_cpu_send_error(struct ds_cap_state
*cp
, struct ds_data
*data
)
459 struct dr_cpu_tag
*tag
= (struct dr_cpu_tag
*) (data
+ 1);
460 struct ds_info
*dp
= ds_info
;
463 struct dr_cpu_tag tag
;
467 memset(&pkt
, 0, sizeof(pkt
));
468 pkt
.data
.tag
.type
= DS_DATA
;
469 pkt
.data
.handle
= cp
->handle
;
470 pkt
.tag
.req_num
= tag
->req_num
;
471 pkt
.tag
.type
= DR_CPU_ERROR
;
472 pkt
.tag
.num_records
= 0;
474 msg_len
= (sizeof(struct ds_data
) +
475 sizeof(struct dr_cpu_tag
));
477 pkt
.data
.tag
.len
= msg_len
- sizeof(struct ds_msg_tag
);
479 ds_send(dp
->lp
, &pkt
, msg_len
);
482 static void dr_cpu_send_error(struct ds_cap_state
*cp
, struct ds_data
*data
)
486 spin_lock_irqsave(&ds_lock
, flags
);
487 __dr_cpu_send_error(cp
, data
);
488 spin_unlock_irqrestore(&ds_lock
, flags
);
491 #define CPU_SENTINEL 0xffffffff
493 static void purge_dups(u32
*list
, u32 num_ents
)
497 for (i
= 0; i
< num_ents
; i
++) {
501 if (cpu
== CPU_SENTINEL
)
504 for (j
= i
+ 1; j
< num_ents
; j
++) {
506 list
[j
] = CPU_SENTINEL
;
511 static int dr_cpu_size_response(int ncpus
)
513 return (sizeof(struct ds_data
) +
514 sizeof(struct dr_cpu_tag
) +
515 (sizeof(struct dr_cpu_resp_entry
) * ncpus
));
518 static void dr_cpu_init_response(struct ds_data
*resp
, u64 req_num
,
519 u64 handle
, int resp_len
, int ncpus
,
520 cpumask_t
*mask
, u32 default_stat
)
522 struct dr_cpu_resp_entry
*ent
;
523 struct dr_cpu_tag
*tag
;
526 tag
= (struct dr_cpu_tag
*) (resp
+ 1);
527 ent
= (struct dr_cpu_resp_entry
*) (tag
+ 1);
529 resp
->tag
.type
= DS_DATA
;
530 resp
->tag
.len
= resp_len
- sizeof(struct ds_msg_tag
);
531 resp
->handle
= handle
;
532 tag
->req_num
= req_num
;
533 tag
->type
= DR_CPU_OK
;
534 tag
->num_records
= ncpus
;
537 for_each_cpu_mask(cpu
, *mask
) {
539 ent
[i
].result
= DR_CPU_RES_OK
;
540 ent
[i
].stat
= default_stat
;
546 static void dr_cpu_mark(struct ds_data
*resp
, int cpu
, int ncpus
,
549 struct dr_cpu_resp_entry
*ent
;
550 struct dr_cpu_tag
*tag
;
553 tag
= (struct dr_cpu_tag
*) (resp
+ 1);
554 ent
= (struct dr_cpu_resp_entry
*) (tag
+ 1);
556 for (i
= 0; i
< ncpus
; i
++) {
557 if (ent
[i
].cpu
!= cpu
)
565 static int dr_cpu_configure(struct ds_cap_state
*cp
, u64 req_num
,
568 struct ds_data
*resp
;
569 int resp_len
, ncpus
, cpu
;
572 ncpus
= cpus_weight(*mask
);
573 resp_len
= dr_cpu_size_response(ncpus
);
574 resp
= kzalloc(resp_len
, GFP_KERNEL
);
578 dr_cpu_init_response(resp
, req_num
, cp
->handle
,
579 resp_len
, ncpus
, mask
,
580 DR_CPU_STAT_CONFIGURED
);
582 mdesc_fill_in_cpu_data(*mask
);
584 for_each_cpu_mask(cpu
, *mask
) {
587 printk(KERN_INFO PFX
"Starting cpu %d...\n", cpu
);
590 dr_cpu_mark(resp
, cpu
, ncpus
,
592 DR_CPU_STAT_UNCONFIGURED
);
595 spin_lock_irqsave(&ds_lock
, flags
);
596 ds_send(ds_info
->lp
, resp
, resp_len
);
597 spin_unlock_irqrestore(&ds_lock
, flags
);
604 static int dr_cpu_unconfigure(struct ds_cap_state
*cp
, u64 req_num
,
607 struct ds_data
*resp
;
610 ncpus
= cpus_weight(*mask
);
611 resp_len
= dr_cpu_size_response(ncpus
);
612 resp
= kzalloc(resp_len
, GFP_KERNEL
);
616 dr_cpu_init_response(resp
, req_num
, cp
->handle
,
617 resp_len
, ncpus
, mask
,
618 DR_CPU_STAT_UNCONFIGURED
);
625 static void dr_cpu_process(struct work_struct
*work
)
627 struct dr_cpu_queue_entry
*qp
, *tmp
;
628 struct ds_cap_state
*cp
;
633 cp
= find_cap_by_string("dr-cpu");
635 spin_lock_irqsave(&ds_lock
, flags
);
636 list_splice(&dr_cpu_work_list
, &todo
);
637 spin_unlock_irqrestore(&ds_lock
, flags
);
639 list_for_each_entry_safe(qp
, tmp
, &todo
, list
) {
640 struct ds_data
*data
= (struct ds_data
*) qp
->req
;
641 struct dr_cpu_tag
*tag
= (struct dr_cpu_tag
*) (data
+ 1);
642 u32
*cpu_list
= (u32
*) (tag
+ 1);
643 u64 req_num
= tag
->req_num
;
648 case DR_CPU_CONFIGURE
:
649 case DR_CPU_UNCONFIGURE
:
650 case DR_CPU_FORCE_UNCONFIGURE
:
654 dr_cpu_send_error(cp
, data
);
658 purge_dups(cpu_list
, tag
->num_records
);
661 for (i
= 0; i
< tag
->num_records
; i
++) {
662 if (cpu_list
[i
] == CPU_SENTINEL
)
665 if (cpu_list
[i
] < NR_CPUS
)
666 cpu_set(cpu_list
[i
], mask
);
669 if (tag
->type
== DR_CPU_CONFIGURE
)
670 err
= dr_cpu_configure(cp
, req_num
, &mask
);
672 err
= dr_cpu_unconfigure(cp
, req_num
, &mask
);
675 dr_cpu_send_error(cp
, data
);
683 static DECLARE_WORK(dr_cpu_work
, dr_cpu_process
);
685 static void dr_cpu_data(struct ldc_channel
*lp
,
686 struct ds_cap_state
*dp
,
689 struct dr_cpu_queue_entry
*qp
;
690 struct ds_data
*dpkt
= buf
;
691 struct dr_cpu_tag
*rp
;
693 rp
= (struct dr_cpu_tag
*) (dpkt
+ 1);
695 qp
= kmalloc(sizeof(struct dr_cpu_queue_entry
) + len
, GFP_ATOMIC
);
697 struct ds_cap_state
*cp
;
699 cp
= find_cap_by_string("dr-cpu");
700 __dr_cpu_send_error(cp
, dpkt
);
702 memcpy(&qp
->req
, buf
, len
);
703 list_add_tail(&qp
->list
, &dr_cpu_work_list
);
704 schedule_work(&dr_cpu_work
);
711 #define DS_PRI_REQUEST 0x00
712 #define DS_PRI_DATA 0x01
713 #define DS_PRI_UPDATE 0x02
716 static void ds_pri_data(struct ldc_channel
*lp
,
717 struct ds_cap_state
*dp
,
720 struct ds_data
*dpkt
= buf
;
721 struct ds_pri_msg
*rp
;
723 rp
= (struct ds_pri_msg
*) (dpkt
+ 1);
725 printk(KERN_INFO PFX
"PRI REQ [%lx:%lx], len=%d\n",
726 rp
->req_num
, rp
->type
, len
);
731 #define DS_VAR_SET_REQ 0x00
732 #define DS_VAR_DELETE_REQ 0x01
733 #define DS_VAR_SET_RESP 0x02
734 #define DS_VAR_DELETE_RESP 0x03
737 struct ds_var_set_msg
{
738 struct ds_var_hdr hdr
;
739 char name_and_value
[0];
742 struct ds_var_delete_msg
{
743 struct ds_var_hdr hdr
;
748 struct ds_var_hdr hdr
;
750 #define DS_VAR_SUCCESS 0x00
751 #define DS_VAR_NO_SPACE 0x01
752 #define DS_VAR_INVALID_VAR 0x02
753 #define DS_VAR_INVALID_VAL 0x03
754 #define DS_VAR_NOT_PRESENT 0x04
757 static DEFINE_MUTEX(ds_var_mutex
);
758 static int ds_var_doorbell
;
759 static int ds_var_response
;
761 static void ds_var_data(struct ldc_channel
*lp
,
762 struct ds_cap_state
*dp
,
765 struct ds_data
*dpkt
= buf
;
766 struct ds_var_resp
*rp
;
768 rp
= (struct ds_var_resp
*) (dpkt
+ 1);
770 if (rp
->hdr
.type
!= DS_VAR_SET_RESP
&&
771 rp
->hdr
.type
!= DS_VAR_DELETE_RESP
)
774 ds_var_response
= rp
->result
;
779 void ldom_set_var(const char *var
, const char *value
)
781 struct ds_info
*dp
= ds_info
;
782 struct ds_cap_state
*cp
;
784 cp
= find_cap_by_string("var-config");
785 if (cp
->state
!= CAP_STATE_REGISTERED
)
786 cp
= find_cap_by_string("var-config-backup");
788 if (cp
->state
== CAP_STATE_REGISTERED
) {
792 struct ds_var_set_msg msg
;
800 memset(&pkt
, 0, sizeof(pkt
));
801 pkt
.header
.data
.tag
.type
= DS_DATA
;
802 pkt
.header
.data
.handle
= cp
->handle
;
803 pkt
.header
.msg
.hdr
.type
= DS_VAR_SET_REQ
;
804 base
= p
= &pkt
.header
.msg
.name_and_value
[0];
806 p
+= strlen(var
) + 1;
808 p
+= strlen(value
) + 1;
810 msg_len
= (sizeof(struct ds_data
) +
811 sizeof(struct ds_var_set_msg
) +
813 msg_len
= (msg_len
+ 3) & ~3;
814 pkt
.header
.data
.tag
.len
= msg_len
- sizeof(struct ds_msg_tag
);
816 mutex_lock(&ds_var_mutex
);
818 spin_lock_irqsave(&ds_lock
, flags
);
820 ds_var_response
= -1;
822 ds_send(dp
->lp
, &pkt
, msg_len
);
823 spin_unlock_irqrestore(&ds_lock
, flags
);
826 while (ds_var_doorbell
== 0) {
833 mutex_unlock(&ds_var_mutex
);
835 if (ds_var_doorbell
== 0 ||
836 ds_var_response
!= DS_VAR_SUCCESS
)
837 printk(KERN_ERR PFX
"var-config [%s:%s] "
838 "failed, response(%d).\n",
842 printk(KERN_ERR PFX
"var-config not registered so "
843 "could not set (%s) variable to (%s).\n",
848 void ldom_reboot(const char *boot_command
)
850 /* Don't bother with any of this if the boot_command
853 if (boot_command
&& strlen(boot_command
)) {
854 char full_boot_str
[256];
856 strcpy(full_boot_str
, "boot ");
857 strcpy(full_boot_str
+ strlen("boot "), boot_command
);
859 ldom_set_var("reboot-command", full_boot_str
);
864 void ldom_power_off(void)
869 static void ds_conn_reset(struct ds_info
*dp
)
871 printk(KERN_ERR PFX
"ds_conn_reset() from %p\n",
872 __builtin_return_address(0));
875 static int register_services(struct ds_info
*dp
)
877 struct ldc_channel
*lp
= dp
->lp
;
880 for (i
= 0; i
< ARRAY_SIZE(ds_states
); i
++) {
882 struct ds_reg_req req
;
885 struct ds_cap_state
*cp
= &ds_states
[i
];
889 if (cp
->state
== CAP_STATE_REGISTERED
)
892 new_count
= sched_clock() & 0xffffffff;
893 cp
->handle
= ((u64
) i
<< 32) | new_count
;
895 msg_len
= (sizeof(struct ds_reg_req
) +
896 strlen(cp
->service_id
));
898 memset(&pbuf
, 0, sizeof(pbuf
));
899 pbuf
.req
.tag
.type
= DS_REG_REQ
;
900 pbuf
.req
.tag
.len
= (msg_len
- sizeof(struct ds_msg_tag
));
901 pbuf
.req
.handle
= cp
->handle
;
904 strcpy(pbuf
.req
.svc_id
, cp
->service_id
);
906 err
= ds_send(lp
, &pbuf
, msg_len
);
908 cp
->state
= CAP_STATE_REG_SENT
;
913 static int ds_handshake(struct ds_info
*dp
, struct ds_msg_tag
*pkt
)
916 if (dp
->hs_state
== DS_HS_START
) {
917 if (pkt
->type
!= DS_INIT_ACK
)
920 dp
->hs_state
= DS_HS_DONE
;
922 return register_services(dp
);
925 if (dp
->hs_state
!= DS_HS_DONE
)
928 if (pkt
->type
== DS_REG_ACK
) {
929 struct ds_reg_ack
*ap
= (struct ds_reg_ack
*) pkt
;
930 struct ds_cap_state
*cp
= find_cap(ap
->handle
);
933 printk(KERN_ERR PFX
"REG ACK for unknown handle %lx\n",
937 printk(KERN_INFO PFX
"Registered %s service.\n",
939 cp
->state
= CAP_STATE_REGISTERED
;
940 } else if (pkt
->type
== DS_REG_NACK
) {
941 struct ds_reg_nack
*np
= (struct ds_reg_nack
*) pkt
;
942 struct ds_cap_state
*cp
= find_cap(np
->handle
);
945 printk(KERN_ERR PFX
"REG NACK for "
946 "unknown handle %lx\n",
950 printk(KERN_INFO PFX
"Could not register %s service\n",
952 cp
->state
= CAP_STATE_UNKNOWN
;
962 static int ds_data(struct ds_info
*dp
, struct ds_msg_tag
*pkt
, int len
)
964 struct ds_data
*dpkt
= (struct ds_data
*) pkt
;
965 struct ds_cap_state
*cp
= find_cap(dpkt
->handle
);
968 struct ds_data_nack nack
= {
971 .len
= (sizeof(struct ds_data_nack
) -
972 sizeof(struct ds_msg_tag
)),
974 .handle
= dpkt
->handle
,
975 .result
= DS_INV_HDL
,
978 printk(KERN_ERR PFX
"Data for unknown handle %lu\n",
980 ds_send(dp
->lp
, &nack
, sizeof(nack
));
982 cp
->data(dp
->lp
, cp
, dpkt
, len
);
987 static void ds_up(struct ds_info
*dp
)
989 struct ldc_channel
*lp
= dp
->lp
;
990 struct ds_ver_req req
;
993 req
.tag
.type
= DS_INIT_REQ
;
994 req
.tag
.len
= sizeof(req
) - sizeof(struct ds_msg_tag
);
998 err
= ds_send(lp
, &req
, sizeof(req
));
1000 dp
->hs_state
= DS_HS_START
;
1003 static void ds_event(void *arg
, int event
)
1005 struct ds_info
*dp
= arg
;
1006 struct ldc_channel
*lp
= dp
->lp
;
1007 unsigned long flags
;
1010 spin_lock_irqsave(&ds_lock
, flags
);
1012 if (event
== LDC_EVENT_UP
) {
1014 spin_unlock_irqrestore(&ds_lock
, flags
);
1018 if (event
!= LDC_EVENT_DATA_READY
) {
1019 printk(KERN_WARNING PFX
"Unexpected LDC event %d\n", event
);
1020 spin_unlock_irqrestore(&ds_lock
, flags
);
1026 struct ds_msg_tag
*tag
;
1028 err
= ldc_read(lp
, dp
->rcv_buf
, sizeof(*tag
));
1030 if (unlikely(err
< 0)) {
1031 if (err
== -ECONNRESET
)
1039 err
= ldc_read(lp
, tag
+ 1, tag
->len
);
1041 if (unlikely(err
< 0)) {
1042 if (err
== -ECONNRESET
)
1049 if (tag
->type
< DS_DATA
)
1050 err
= ds_handshake(dp
, dp
->rcv_buf
);
1052 err
= ds_data(dp
, dp
->rcv_buf
,
1053 sizeof(*tag
) + err
);
1054 if (err
== -ECONNRESET
)
1058 spin_unlock_irqrestore(&ds_lock
, flags
);
1061 static int __devinit
ds_probe(struct vio_dev
*vdev
,
1062 const struct vio_device_id
*id
)
1064 static int ds_version_printed
;
1065 struct ldc_channel_config ds_cfg
= {
1068 .mode
= LDC_MODE_STREAM
,
1070 struct ldc_channel
*lp
;
1074 if (ds_version_printed
++ == 0)
1075 printk(KERN_INFO
"%s", version
);
1077 dp
= kzalloc(sizeof(*dp
), GFP_KERNEL
);
1082 dp
->rcv_buf
= kzalloc(4096, GFP_KERNEL
);
1086 dp
->rcv_buf_len
= 4096;
1088 ds_cfg
.tx_irq
= vdev
->tx_irq
;
1089 ds_cfg
.rx_irq
= vdev
->rx_irq
;
1091 lp
= ldc_alloc(vdev
->channel_id
, &ds_cfg
, dp
);
1094 goto out_free_rcv_buf
;
1098 err
= ldc_bind(lp
, "DS");
1121 static int ds_remove(struct vio_dev
*vdev
)
1126 static struct vio_device_id ds_match
[] = {
1128 .type
= "domain-services-port",
1133 static struct vio_driver ds_driver
= {
1134 .id_table
= ds_match
,
1136 .remove
= ds_remove
,
1139 .owner
= THIS_MODULE
,
1143 static int __init
ds_init(void)
1147 for (i
= 0; i
< ARRAY_SIZE(ds_states
); i
++)
1148 ds_states
[i
].handle
= ((u64
)i
<< 32);
1150 return vio_register_driver(&ds_driver
);
1153 subsys_initcall(ds_init
);