]>
Commit | Line | Data |
---|---|---|
92d5f4ca | 1 | // SPDX-License-Identifier: GPL-2.0 |
89bcb05d | 2 | /* |
1ce873ab | 3 | * Driver for the HP iLO management processor. |
89bcb05d DA |
4 | * |
5 | * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. | |
6b1eb145 | 6 | * David Altobelli <david.altobelli@hpe.com> |
89bcb05d DA |
7 | */ |
8 | #include <linux/kernel.h> | |
9 | #include <linux/types.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/fs.h> | |
12 | #include <linux/pci.h> | |
9f704841 | 13 | #include <linux/interrupt.h> |
89bcb05d DA |
14 | #include <linux/ioport.h> |
15 | #include <linux/device.h> | |
16 | #include <linux/file.h> | |
17 | #include <linux/cdev.h> | |
d43c36dc | 18 | #include <linux/sched.h> |
89bcb05d DA |
19 | #include <linux/spinlock.h> |
20 | #include <linux/delay.h> | |
21 | #include <linux/uaccess.h> | |
22 | #include <linux/io.h> | |
9f704841 | 23 | #include <linux/wait.h> |
4a351471 | 24 | #include <linux/poll.h> |
5a0e3ad6 | 25 | #include <linux/slab.h> |
89bcb05d DA |
26 | #include "hpilo.h" |
27 | ||
28 | static struct class *ilo_class; | |
29 | static unsigned int ilo_major; | |
ebf1b764 | 30 | static unsigned int max_ccb = 16; |
89bcb05d | 31 | static char ilo_hwdev[MAX_ILO_DEV]; |
bc7de897 MH |
32 | static const struct pci_device_id ilo_blacklist[] = { |
33 | /* auxiliary iLO */ | |
34 | {PCI_DEVICE_SUB(PCI_VENDOR_ID_HP, 0x3307, PCI_VENDOR_ID_HP, 0x1979)}, | |
9b6dba70 MH |
35 | /* CL */ |
36 | {PCI_DEVICE_SUB(PCI_VENDOR_ID_HP, 0x3307, PCI_VENDOR_ID_HP_3PAR, 0x0289)}, | |
bc7de897 MH |
37 | {} |
38 | }; | |
89bcb05d DA |
39 | |
40 | static inline int get_entry_id(int entry) | |
41 | { | |
42 | return (entry & ENTRY_MASK_DESCRIPTOR) >> ENTRY_BITPOS_DESCRIPTOR; | |
43 | } | |
44 | ||
45 | static inline int get_entry_len(int entry) | |
46 | { | |
47 | return ((entry & ENTRY_MASK_QWORDS) >> ENTRY_BITPOS_QWORDS) << 3; | |
48 | } | |
49 | ||
50 | static inline int mk_entry(int id, int len) | |
51 | { | |
52 | int qlen = len & 7 ? (len >> 3) + 1 : len >> 3; | |
53 | return id << ENTRY_BITPOS_DESCRIPTOR | qlen << ENTRY_BITPOS_QWORDS; | |
54 | } | |
55 | ||
56 | static inline int desc_mem_sz(int nr_entry) | |
57 | { | |
58 | return nr_entry << L2_QENTRY_SZ; | |
59 | } | |
60 | ||
61 | /* | |
62 | * FIFO queues, shared with hardware. | |
63 | * | |
64 | * If a queue has empty slots, an entry is added to the queue tail, | |
65 | * and that entry is marked as occupied. | |
66 | * Entries can be dequeued from the head of the list, when the device | |
67 | * has marked the entry as consumed. | |
68 | * | |
69 | * Returns true on successful queue/dequeue, false on failure. | |
70 | */ | |
71 | static int fifo_enqueue(struct ilo_hwinfo *hw, char *fifobar, int entry) | |
72 | { | |
73 | struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar); | |
9f704841 | 74 | unsigned long flags; |
89bcb05d DA |
75 | int ret = 0; |
76 | ||
9f704841 | 77 | spin_lock_irqsave(&hw->fifo_lock, flags); |
89bcb05d DA |
78 | if (!(fifo_q->fifobar[(fifo_q->tail + 1) & fifo_q->imask] |
79 | & ENTRY_MASK_O)) { | |
80 | fifo_q->fifobar[fifo_q->tail & fifo_q->imask] |= | |
81 | (entry & ENTRY_MASK_NOSTATE) | fifo_q->merge; | |
82 | fifo_q->tail += 1; | |
83 | ret = 1; | |
84 | } | |
9f704841 | 85 | spin_unlock_irqrestore(&hw->fifo_lock, flags); |
89bcb05d DA |
86 | |
87 | return ret; | |
88 | } | |
89 | ||
90 | static int fifo_dequeue(struct ilo_hwinfo *hw, char *fifobar, int *entry) | |
91 | { | |
92 | struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar); | |
9f704841 | 93 | unsigned long flags; |
89bcb05d DA |
94 | int ret = 0; |
95 | u64 c; | |
96 | ||
9f704841 | 97 | spin_lock_irqsave(&hw->fifo_lock, flags); |
89bcb05d DA |
98 | c = fifo_q->fifobar[fifo_q->head & fifo_q->imask]; |
99 | if (c & ENTRY_MASK_C) { | |
100 | if (entry) | |
101 | *entry = c & ENTRY_MASK_NOSTATE; | |
102 | ||
103 | fifo_q->fifobar[fifo_q->head & fifo_q->imask] = | |
104 | (c | ENTRY_MASK) + 1; | |
105 | fifo_q->head += 1; | |
106 | ret = 1; | |
107 | } | |
9f704841 | 108 | spin_unlock_irqrestore(&hw->fifo_lock, flags); |
89bcb05d DA |
109 | |
110 | return ret; | |
111 | } | |
112 | ||
4a351471 DA |
113 | static int fifo_check_recv(struct ilo_hwinfo *hw, char *fifobar) |
114 | { | |
115 | struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar); | |
116 | unsigned long flags; | |
117 | int ret = 0; | |
118 | u64 c; | |
119 | ||
120 | spin_lock_irqsave(&hw->fifo_lock, flags); | |
121 | c = fifo_q->fifobar[fifo_q->head & fifo_q->imask]; | |
122 | if (c & ENTRY_MASK_C) | |
123 | ret = 1; | |
124 | spin_unlock_irqrestore(&hw->fifo_lock, flags); | |
125 | ||
126 | return ret; | |
127 | } | |
128 | ||
89bcb05d DA |
129 | static int ilo_pkt_enqueue(struct ilo_hwinfo *hw, struct ccb *ccb, |
130 | int dir, int id, int len) | |
131 | { | |
132 | char *fifobar; | |
133 | int entry; | |
134 | ||
135 | if (dir == SENDQ) | |
136 | fifobar = ccb->ccb_u1.send_fifobar; | |
137 | else | |
138 | fifobar = ccb->ccb_u3.recv_fifobar; | |
139 | ||
140 | entry = mk_entry(id, len); | |
141 | return fifo_enqueue(hw, fifobar, entry); | |
142 | } | |
143 | ||
144 | static int ilo_pkt_dequeue(struct ilo_hwinfo *hw, struct ccb *ccb, | |
145 | int dir, int *id, int *len, void **pkt) | |
146 | { | |
147 | char *fifobar, *desc; | |
148 | int entry = 0, pkt_id = 0; | |
149 | int ret; | |
150 | ||
151 | if (dir == SENDQ) { | |
152 | fifobar = ccb->ccb_u1.send_fifobar; | |
153 | desc = ccb->ccb_u2.send_desc; | |
154 | } else { | |
155 | fifobar = ccb->ccb_u3.recv_fifobar; | |
156 | desc = ccb->ccb_u4.recv_desc; | |
157 | } | |
158 | ||
159 | ret = fifo_dequeue(hw, fifobar, &entry); | |
160 | if (ret) { | |
161 | pkt_id = get_entry_id(entry); | |
162 | if (id) | |
163 | *id = pkt_id; | |
164 | if (len) | |
165 | *len = get_entry_len(entry); | |
166 | if (pkt) | |
167 | *pkt = (void *)(desc + desc_mem_sz(pkt_id)); | |
168 | } | |
169 | ||
170 | return ret; | |
171 | } | |
172 | ||
4a351471 DA |
173 | static int ilo_pkt_recv(struct ilo_hwinfo *hw, struct ccb *ccb) |
174 | { | |
175 | char *fifobar = ccb->ccb_u3.recv_fifobar; | |
176 | ||
177 | return fifo_check_recv(hw, fifobar); | |
178 | } | |
179 | ||
89bcb05d DA |
180 | static inline void doorbell_set(struct ccb *ccb) |
181 | { | |
182 | iowrite8(1, ccb->ccb_u5.db_base); | |
183 | } | |
184 | ||
185 | static inline void doorbell_clr(struct ccb *ccb) | |
186 | { | |
187 | iowrite8(2, ccb->ccb_u5.db_base); | |
188 | } | |
66d5e516 | 189 | |
89bcb05d DA |
190 | static inline int ctrl_set(int l2sz, int idxmask, int desclim) |
191 | { | |
192 | int active = 0, go = 1; | |
193 | return l2sz << CTRL_BITPOS_L2SZ | | |
194 | idxmask << CTRL_BITPOS_FIFOINDEXMASK | | |
195 | desclim << CTRL_BITPOS_DESCLIMIT | | |
196 | active << CTRL_BITPOS_A | | |
197 | go << CTRL_BITPOS_G; | |
198 | } | |
66d5e516 | 199 | |
89bcb05d DA |
200 | static void ctrl_setup(struct ccb *ccb, int nr_desc, int l2desc_sz) |
201 | { | |
202 | /* for simplicity, use the same parameters for send and recv ctrls */ | |
203 | ccb->send_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1); | |
204 | ccb->recv_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1); | |
205 | } | |
206 | ||
207 | static inline int fifo_sz(int nr_entry) | |
208 | { | |
209 | /* size of a fifo is determined by the number of entries it contains */ | |
fadbfc38 | 210 | return nr_entry * sizeof(u64) + FIFOHANDLESIZE; |
89bcb05d DA |
211 | } |
212 | ||
213 | static void fifo_setup(void *base_addr, int nr_entry) | |
214 | { | |
215 | struct fifo *fifo_q = base_addr; | |
216 | int i; | |
217 | ||
218 | /* set up an empty fifo */ | |
219 | fifo_q->head = 0; | |
220 | fifo_q->tail = 0; | |
221 | fifo_q->reset = 0; | |
222 | fifo_q->nrents = nr_entry; | |
223 | fifo_q->imask = nr_entry - 1; | |
224 | fifo_q->merge = ENTRY_MASK_O; | |
225 | ||
226 | for (i = 0; i < nr_entry; i++) | |
227 | fifo_q->fifobar[i] = 0; | |
228 | } | |
229 | ||
230 | static void ilo_ccb_close(struct pci_dev *pdev, struct ccb_data *data) | |
231 | { | |
66d5e516 DA |
232 | struct ccb *driver_ccb = &data->driver_ccb; |
233 | struct ccb __iomem *device_ccb = data->mapped_ccb; | |
89bcb05d DA |
234 | int retries; |
235 | ||
89bcb05d DA |
236 | /* complicated dance to tell the hw we are stopping */ |
237 | doorbell_clr(driver_ccb); | |
238 | iowrite32(ioread32(&device_ccb->send_ctrl) & ~(1 << CTRL_BITPOS_G), | |
239 | &device_ccb->send_ctrl); | |
240 | iowrite32(ioread32(&device_ccb->recv_ctrl) & ~(1 << CTRL_BITPOS_G), | |
241 | &device_ccb->recv_ctrl); | |
242 | ||
243 | /* give iLO some time to process stop request */ | |
c073b2db | 244 | for (retries = MAX_WAIT; retries > 0; retries--) { |
89bcb05d | 245 | doorbell_set(driver_ccb); |
891f7d73 | 246 | udelay(WAIT_TIME); |
89bcb05d DA |
247 | if (!(ioread32(&device_ccb->send_ctrl) & (1 << CTRL_BITPOS_A)) |
248 | && | |
249 | !(ioread32(&device_ccb->recv_ctrl) & (1 << CTRL_BITPOS_A))) | |
250 | break; | |
251 | } | |
252 | if (retries == 0) | |
253 | dev_err(&pdev->dev, "Closing, but controller still active\n"); | |
254 | ||
255 | /* clear the hw ccb */ | |
256 | memset_io(device_ccb, 0, sizeof(struct ccb)); | |
257 | ||
258 | /* free resources used to back send/recv queues */ | |
146f90af CJ |
259 | dma_free_coherent(&pdev->dev, data->dma_size, data->dma_va, |
260 | data->dma_pa); | |
89bcb05d DA |
261 | } |
262 | ||
66d5e516 | 263 | static int ilo_ccb_setup(struct ilo_hwinfo *hw, struct ccb_data *data, int slot) |
89bcb05d | 264 | { |
cdf8afca PB |
265 | char *dma_va; |
266 | dma_addr_t dma_pa; | |
89bcb05d | 267 | struct ccb *driver_ccb, *ilo_ccb; |
89bcb05d DA |
268 | |
269 | driver_ccb = &data->driver_ccb; | |
270 | ilo_ccb = &data->ilo_ccb; | |
89bcb05d DA |
271 | |
272 | data->dma_size = 2 * fifo_sz(NR_QENTRY) + | |
273 | 2 * desc_mem_sz(NR_QENTRY) + | |
274 | ILO_START_ALIGN + ILO_CACHE_SZ; | |
275 | ||
146f90af CJ |
276 | data->dma_va = dma_alloc_coherent(&hw->ilo_dev->dev, data->dma_size, |
277 | &data->dma_pa, GFP_ATOMIC); | |
89bcb05d | 278 | if (!data->dma_va) |
66d5e516 | 279 | return -ENOMEM; |
89bcb05d DA |
280 | |
281 | dma_va = (char *)data->dma_va; | |
cdf8afca | 282 | dma_pa = data->dma_pa; |
89bcb05d | 283 | |
89bcb05d | 284 | dma_va = (char *)roundup((unsigned long)dma_va, ILO_START_ALIGN); |
cdf8afca | 285 | dma_pa = roundup(dma_pa, ILO_START_ALIGN); |
89bcb05d DA |
286 | |
287 | /* | |
288 | * Create two ccb's, one with virt addrs, one with phys addrs. | |
289 | * Copy the phys addr ccb to device shared mem. | |
290 | */ | |
291 | ctrl_setup(driver_ccb, NR_QENTRY, L2_QENTRY_SZ); | |
292 | ctrl_setup(ilo_ccb, NR_QENTRY, L2_QENTRY_SZ); | |
293 | ||
294 | fifo_setup(dma_va, NR_QENTRY); | |
295 | driver_ccb->ccb_u1.send_fifobar = dma_va + FIFOHANDLESIZE; | |
cdf8afca | 296 | ilo_ccb->ccb_u1.send_fifobar_pa = dma_pa + FIFOHANDLESIZE; |
89bcb05d DA |
297 | dma_va += fifo_sz(NR_QENTRY); |
298 | dma_pa += fifo_sz(NR_QENTRY); | |
299 | ||
300 | dma_va = (char *)roundup((unsigned long)dma_va, ILO_CACHE_SZ); | |
cdf8afca | 301 | dma_pa = roundup(dma_pa, ILO_CACHE_SZ); |
89bcb05d DA |
302 | |
303 | fifo_setup(dma_va, NR_QENTRY); | |
304 | driver_ccb->ccb_u3.recv_fifobar = dma_va + FIFOHANDLESIZE; | |
cdf8afca | 305 | ilo_ccb->ccb_u3.recv_fifobar_pa = dma_pa + FIFOHANDLESIZE; |
89bcb05d DA |
306 | dma_va += fifo_sz(NR_QENTRY); |
307 | dma_pa += fifo_sz(NR_QENTRY); | |
308 | ||
309 | driver_ccb->ccb_u2.send_desc = dma_va; | |
cdf8afca | 310 | ilo_ccb->ccb_u2.send_desc_pa = dma_pa; |
89bcb05d DA |
311 | dma_pa += desc_mem_sz(NR_QENTRY); |
312 | dma_va += desc_mem_sz(NR_QENTRY); | |
313 | ||
314 | driver_ccb->ccb_u4.recv_desc = dma_va; | |
cdf8afca | 315 | ilo_ccb->ccb_u4.recv_desc_pa = dma_pa; |
89bcb05d DA |
316 | |
317 | driver_ccb->channel = slot; | |
318 | ilo_ccb->channel = slot; | |
319 | ||
320 | driver_ccb->ccb_u5.db_base = hw->db_vaddr + (slot << L2_DB_SIZE); | |
321 | ilo_ccb->ccb_u5.db_base = NULL; /* hw ccb's doorbell is not used */ | |
322 | ||
66d5e516 DA |
323 | return 0; |
324 | } | |
325 | ||
326 | static void ilo_ccb_open(struct ilo_hwinfo *hw, struct ccb_data *data, int slot) | |
327 | { | |
328 | int pkt_id, pkt_sz; | |
329 | struct ccb *driver_ccb = &data->driver_ccb; | |
330 | ||
89bcb05d DA |
331 | /* copy the ccb with physical addrs to device memory */ |
332 | data->mapped_ccb = (struct ccb __iomem *) | |
333 | (hw->ram_vaddr + (slot * ILOHW_CCB_SZ)); | |
66d5e516 | 334 | memcpy_toio(data->mapped_ccb, &data->ilo_ccb, sizeof(struct ccb)); |
89bcb05d DA |
335 | |
336 | /* put packets on the send and receive queues */ | |
337 | pkt_sz = 0; | |
338 | for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++) { | |
339 | ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, pkt_sz); | |
340 | doorbell_set(driver_ccb); | |
341 | } | |
342 | ||
343 | pkt_sz = desc_mem_sz(1); | |
344 | for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++) | |
345 | ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, pkt_sz); | |
346 | ||
66d5e516 | 347 | /* the ccb is ready to use */ |
89bcb05d | 348 | doorbell_clr(driver_ccb); |
66d5e516 DA |
349 | } |
350 | ||
351 | static int ilo_ccb_verify(struct ilo_hwinfo *hw, struct ccb_data *data) | |
352 | { | |
353 | int pkt_id, i; | |
354 | struct ccb *driver_ccb = &data->driver_ccb; | |
89bcb05d DA |
355 | |
356 | /* make sure iLO is really handling requests */ | |
c073b2db | 357 | for (i = MAX_WAIT; i > 0; i--) { |
89bcb05d DA |
358 | if (ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, NULL, NULL)) |
359 | break; | |
891f7d73 | 360 | udelay(WAIT_TIME); |
89bcb05d DA |
361 | } |
362 | ||
66d5e516 DA |
363 | if (i == 0) { |
364 | dev_err(&hw->ilo_dev->dev, "Open could not dequeue a packet\n"); | |
365 | return -EBUSY; | |
89bcb05d DA |
366 | } |
367 | ||
66d5e516 DA |
368 | ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, 0); |
369 | doorbell_set(driver_ccb); | |
89bcb05d | 370 | return 0; |
89bcb05d DA |
371 | } |
372 | ||
373 | static inline int is_channel_reset(struct ccb *ccb) | |
374 | { | |
375 | /* check for this particular channel needing a reset */ | |
376 | return FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset; | |
377 | } | |
378 | ||
379 | static inline void set_channel_reset(struct ccb *ccb) | |
380 | { | |
381 | /* set a flag indicating this channel needs a reset */ | |
382 | FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset = 1; | |
383 | } | |
384 | ||
66d5e516 DA |
385 | static inline int get_device_outbound(struct ilo_hwinfo *hw) |
386 | { | |
387 | return ioread32(&hw->mmio_vaddr[DB_OUT]); | |
388 | } | |
389 | ||
390 | static inline int is_db_reset(int db_out) | |
391 | { | |
392 | return db_out & (1 << DB_RESET); | |
393 | } | |
394 | ||
89bcb05d DA |
395 | static inline int is_device_reset(struct ilo_hwinfo *hw) |
396 | { | |
397 | /* check for global reset condition */ | |
66d5e516 DA |
398 | return is_db_reset(get_device_outbound(hw)); |
399 | } | |
400 | ||
401 | static inline void clear_pending_db(struct ilo_hwinfo *hw, int clr) | |
402 | { | |
403 | iowrite32(clr, &hw->mmio_vaddr[DB_OUT]); | |
89bcb05d DA |
404 | } |
405 | ||
406 | static inline void clear_device(struct ilo_hwinfo *hw) | |
407 | { | |
408 | /* clear the device (reset bits, pending channel entries) */ | |
66d5e516 | 409 | clear_pending_db(hw, -1); |
89bcb05d DA |
410 | } |
411 | ||
9f704841 DA |
412 | static inline void ilo_enable_interrupts(struct ilo_hwinfo *hw) |
413 | { | |
414 | iowrite8(ioread8(&hw->mmio_vaddr[DB_IRQ]) | 1, &hw->mmio_vaddr[DB_IRQ]); | |
415 | } | |
416 | ||
417 | static inline void ilo_disable_interrupts(struct ilo_hwinfo *hw) | |
418 | { | |
419 | iowrite8(ioread8(&hw->mmio_vaddr[DB_IRQ]) & ~1, | |
420 | &hw->mmio_vaddr[DB_IRQ]); | |
421 | } | |
422 | ||
423 | static void ilo_set_reset(struct ilo_hwinfo *hw) | |
89bcb05d DA |
424 | { |
425 | int slot; | |
426 | ||
427 | /* | |
428 | * Mapped memory is zeroed on ilo reset, so set a per ccb flag | |
429 | * to indicate that this ccb needs to be closed and reopened. | |
430 | */ | |
98dcd59d | 431 | for (slot = 0; slot < max_ccb; slot++) { |
89bcb05d DA |
432 | if (!hw->ccb_alloc[slot]) |
433 | continue; | |
434 | set_channel_reset(&hw->ccb_alloc[slot]->driver_ccb); | |
435 | } | |
89bcb05d DA |
436 | } |
437 | ||
438 | static ssize_t ilo_read(struct file *fp, char __user *buf, | |
439 | size_t len, loff_t *off) | |
440 | { | |
441 | int err, found, cnt, pkt_id, pkt_len; | |
66d5e516 DA |
442 | struct ccb_data *data = fp->private_data; |
443 | struct ccb *driver_ccb = &data->driver_ccb; | |
444 | struct ilo_hwinfo *hw = data->ilo_hw; | |
89bcb05d DA |
445 | void *pkt; |
446 | ||
9f704841 | 447 | if (is_channel_reset(driver_ccb)) { |
89bcb05d DA |
448 | /* |
449 | * If the device has been reset, applications | |
450 | * need to close and reopen all ccbs. | |
451 | */ | |
89bcb05d DA |
452 | return -ENODEV; |
453 | } | |
454 | ||
455 | /* | |
456 | * This function is to be called when data is expected | |
457 | * in the channel, and will return an error if no packet is found | |
458 | * during the loop below. The sleep/retry logic is to allow | |
459 | * applications to call read() immediately post write(), | |
460 | * and give iLO some time to process the sent packet. | |
461 | */ | |
462 | cnt = 20; | |
463 | do { | |
464 | /* look for a received packet */ | |
465 | found = ilo_pkt_dequeue(hw, driver_ccb, RECVQ, &pkt_id, | |
466 | &pkt_len, &pkt); | |
467 | if (found) | |
468 | break; | |
469 | cnt--; | |
470 | msleep(100); | |
471 | } while (!found && cnt); | |
472 | ||
473 | if (!found) | |
474 | return -EAGAIN; | |
475 | ||
476 | /* only copy the length of the received packet */ | |
477 | if (pkt_len < len) | |
478 | len = pkt_len; | |
479 | ||
480 | err = copy_to_user(buf, pkt, len); | |
481 | ||
482 | /* return the received packet to the queue */ | |
483 | ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, desc_mem_sz(1)); | |
484 | ||
485 | return err ? -EFAULT : len; | |
486 | } | |
487 | ||
488 | static ssize_t ilo_write(struct file *fp, const char __user *buf, | |
489 | size_t len, loff_t *off) | |
490 | { | |
491 | int err, pkt_id, pkt_len; | |
66d5e516 DA |
492 | struct ccb_data *data = fp->private_data; |
493 | struct ccb *driver_ccb = &data->driver_ccb; | |
494 | struct ilo_hwinfo *hw = data->ilo_hw; | |
89bcb05d DA |
495 | void *pkt; |
496 | ||
9f704841 | 497 | if (is_channel_reset(driver_ccb)) |
89bcb05d | 498 | return -ENODEV; |
89bcb05d DA |
499 | |
500 | /* get a packet to send the user command */ | |
501 | if (!ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, &pkt_len, &pkt)) | |
502 | return -EBUSY; | |
503 | ||
504 | /* limit the length to the length of the packet */ | |
505 | if (pkt_len < len) | |
506 | len = pkt_len; | |
507 | ||
508 | /* on failure, set the len to 0 to return empty packet to the device */ | |
509 | err = copy_from_user(pkt, buf, len); | |
510 | if (err) | |
511 | len = 0; | |
512 | ||
513 | /* send the packet */ | |
514 | ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, len); | |
515 | doorbell_set(driver_ccb); | |
516 | ||
517 | return err ? -EFAULT : len; | |
518 | } | |
519 | ||
afc9a42b | 520 | static __poll_t ilo_poll(struct file *fp, poll_table *wait) |
4a351471 DA |
521 | { |
522 | struct ccb_data *data = fp->private_data; | |
523 | struct ccb *driver_ccb = &data->driver_ccb; | |
524 | ||
525 | poll_wait(fp, &data->ccb_waitq, wait); | |
526 | ||
527 | if (is_channel_reset(driver_ccb)) | |
a9a08845 | 528 | return EPOLLERR; |
4a351471 | 529 | else if (ilo_pkt_recv(data->ilo_hw, driver_ccb)) |
a9a08845 | 530 | return EPOLLIN | EPOLLRDNORM; |
4a351471 DA |
531 | |
532 | return 0; | |
533 | } | |
534 | ||
89bcb05d DA |
535 | static int ilo_close(struct inode *ip, struct file *fp) |
536 | { | |
537 | int slot; | |
538 | struct ccb_data *data; | |
539 | struct ilo_hwinfo *hw; | |
9f704841 | 540 | unsigned long flags; |
89bcb05d | 541 | |
98dcd59d | 542 | slot = iminor(ip) % max_ccb; |
89bcb05d DA |
543 | hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev); |
544 | ||
9f704841 | 545 | spin_lock(&hw->open_lock); |
89bcb05d DA |
546 | |
547 | if (hw->ccb_alloc[slot]->ccb_cnt == 1) { | |
548 | ||
549 | data = fp->private_data; | |
550 | ||
9f704841 DA |
551 | spin_lock_irqsave(&hw->alloc_lock, flags); |
552 | hw->ccb_alloc[slot] = NULL; | |
553 | spin_unlock_irqrestore(&hw->alloc_lock, flags); | |
554 | ||
89bcb05d DA |
555 | ilo_ccb_close(hw->ilo_dev, data); |
556 | ||
557 | kfree(data); | |
89bcb05d DA |
558 | } else |
559 | hw->ccb_alloc[slot]->ccb_cnt--; | |
560 | ||
9f704841 | 561 | spin_unlock(&hw->open_lock); |
89bcb05d DA |
562 | |
563 | return 0; | |
564 | } | |
565 | ||
566 | static int ilo_open(struct inode *ip, struct file *fp) | |
567 | { | |
568 | int slot, error; | |
569 | struct ccb_data *data; | |
570 | struct ilo_hwinfo *hw; | |
9f704841 | 571 | unsigned long flags; |
89bcb05d | 572 | |
98dcd59d | 573 | slot = iminor(ip) % max_ccb; |
89bcb05d DA |
574 | hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev); |
575 | ||
576 | /* new ccb allocation */ | |
577 | data = kzalloc(sizeof(*data), GFP_KERNEL); | |
578 | if (!data) | |
579 | return -ENOMEM; | |
580 | ||
9f704841 | 581 | spin_lock(&hw->open_lock); |
89bcb05d DA |
582 | |
583 | /* each fd private_data holds sw/hw view of ccb */ | |
584 | if (hw->ccb_alloc[slot] == NULL) { | |
585 | /* create a channel control block for this minor */ | |
66d5e516 DA |
586 | error = ilo_ccb_setup(hw, data, slot); |
587 | if (error) { | |
89bcb05d | 588 | kfree(data); |
66d5e516 DA |
589 | goto out; |
590 | } | |
591 | ||
9f704841 DA |
592 | data->ccb_cnt = 1; |
593 | data->ccb_excl = fp->f_flags & O_EXCL; | |
594 | data->ilo_hw = hw; | |
595 | init_waitqueue_head(&data->ccb_waitq); | |
596 | ||
66d5e516 | 597 | /* write the ccb to hw */ |
9f704841 | 598 | spin_lock_irqsave(&hw->alloc_lock, flags); |
66d5e516 | 599 | ilo_ccb_open(hw, data, slot); |
9f704841 DA |
600 | hw->ccb_alloc[slot] = data; |
601 | spin_unlock_irqrestore(&hw->alloc_lock, flags); | |
66d5e516 DA |
602 | |
603 | /* make sure the channel is functional */ | |
604 | error = ilo_ccb_verify(hw, data); | |
605 | if (error) { | |
9f704841 DA |
606 | |
607 | spin_lock_irqsave(&hw->alloc_lock, flags); | |
608 | hw->ccb_alloc[slot] = NULL; | |
609 | spin_unlock_irqrestore(&hw->alloc_lock, flags); | |
610 | ||
66d5e516 | 611 | ilo_ccb_close(hw->ilo_dev, data); |
9f704841 | 612 | |
66d5e516 DA |
613 | kfree(data); |
614 | goto out; | |
615 | } | |
616 | ||
89bcb05d DA |
617 | } else { |
618 | kfree(data); | |
619 | if (fp->f_flags & O_EXCL || hw->ccb_alloc[slot]->ccb_excl) { | |
620 | /* | |
621 | * The channel exists, and either this open | |
622 | * or a previous open of this channel wants | |
623 | * exclusive access. | |
624 | */ | |
625 | error = -EBUSY; | |
626 | } else { | |
627 | hw->ccb_alloc[slot]->ccb_cnt++; | |
628 | error = 0; | |
629 | } | |
630 | } | |
66d5e516 | 631 | out: |
9f704841 | 632 | spin_unlock(&hw->open_lock); |
89bcb05d DA |
633 | |
634 | if (!error) | |
635 | fp->private_data = hw->ccb_alloc[slot]; | |
636 | ||
637 | return error; | |
638 | } | |
639 | ||
640 | static const struct file_operations ilo_fops = { | |
641 | .owner = THIS_MODULE, | |
642 | .read = ilo_read, | |
643 | .write = ilo_write, | |
4a351471 | 644 | .poll = ilo_poll, |
89bcb05d DA |
645 | .open = ilo_open, |
646 | .release = ilo_close, | |
6038f373 | 647 | .llseek = noop_llseek, |
89bcb05d DA |
648 | }; |
649 | ||
9f704841 DA |
650 | static irqreturn_t ilo_isr(int irq, void *data) |
651 | { | |
652 | struct ilo_hwinfo *hw = data; | |
653 | int pending, i; | |
654 | ||
655 | spin_lock(&hw->alloc_lock); | |
656 | ||
657 | /* check for ccbs which have data */ | |
658 | pending = get_device_outbound(hw); | |
659 | if (!pending) { | |
660 | spin_unlock(&hw->alloc_lock); | |
661 | return IRQ_NONE; | |
662 | } | |
663 | ||
664 | if (is_db_reset(pending)) { | |
665 | /* wake up all ccbs if the device was reset */ | |
666 | pending = -1; | |
667 | ilo_set_reset(hw); | |
668 | } | |
669 | ||
98dcd59d | 670 | for (i = 0; i < max_ccb; i++) { |
9f704841 DA |
671 | if (!hw->ccb_alloc[i]) |
672 | continue; | |
673 | if (pending & (1 << i)) | |
674 | wake_up_interruptible(&hw->ccb_alloc[i]->ccb_waitq); | |
675 | } | |
676 | ||
677 | /* clear the device of the channels that have been handled */ | |
678 | clear_pending_db(hw, pending); | |
679 | ||
680 | spin_unlock(&hw->alloc_lock); | |
681 | ||
682 | return IRQ_HANDLED; | |
683 | } | |
684 | ||
89bcb05d DA |
685 | static void ilo_unmap_device(struct pci_dev *pdev, struct ilo_hwinfo *hw) |
686 | { | |
687 | pci_iounmap(pdev, hw->db_vaddr); | |
688 | pci_iounmap(pdev, hw->ram_vaddr); | |
689 | pci_iounmap(pdev, hw->mmio_vaddr); | |
690 | } | |
691 | ||
80c8ae28 | 692 | static int ilo_map_device(struct pci_dev *pdev, struct ilo_hwinfo *hw) |
89bcb05d | 693 | { |
c9fef1cc RM |
694 | int bar; |
695 | unsigned long off; | |
89bcb05d DA |
696 | |
697 | /* map the memory mapped i/o registers */ | |
698 | hw->mmio_vaddr = pci_iomap(pdev, 1, 0); | |
699 | if (hw->mmio_vaddr == NULL) { | |
700 | dev_err(&pdev->dev, "Error mapping mmio\n"); | |
701 | goto out; | |
702 | } | |
703 | ||
704 | /* map the adapter shared memory region */ | |
c9fef1cc RM |
705 | if (pdev->subsystem_device == 0x00E4) { |
706 | bar = 5; | |
707 | /* Last 8k is reserved for CCBs */ | |
708 | off = pci_resource_len(pdev, bar) - 0x2000; | |
709 | } else { | |
710 | bar = 2; | |
711 | off = 0; | |
712 | } | |
713 | hw->ram_vaddr = pci_iomap_range(pdev, bar, off, max_ccb * ILOHW_CCB_SZ); | |
89bcb05d DA |
714 | if (hw->ram_vaddr == NULL) { |
715 | dev_err(&pdev->dev, "Error mapping shared mem\n"); | |
716 | goto mmio_free; | |
717 | } | |
718 | ||
719 | /* map the doorbell aperture */ | |
98dcd59d | 720 | hw->db_vaddr = pci_iomap(pdev, 3, max_ccb * ONE_DB_SIZE); |
89bcb05d DA |
721 | if (hw->db_vaddr == NULL) { |
722 | dev_err(&pdev->dev, "Error mapping doorbell\n"); | |
723 | goto ram_free; | |
724 | } | |
725 | ||
726 | return 0; | |
727 | ram_free: | |
728 | pci_iounmap(pdev, hw->ram_vaddr); | |
729 | mmio_free: | |
730 | pci_iounmap(pdev, hw->mmio_vaddr); | |
731 | out: | |
c9fef1cc | 732 | return -ENOMEM; |
89bcb05d DA |
733 | } |
734 | ||
735 | static void ilo_remove(struct pci_dev *pdev) | |
736 | { | |
737 | int i, minor; | |
738 | struct ilo_hwinfo *ilo_hw = pci_get_drvdata(pdev); | |
739 | ||
ebf1b764 MR |
740 | if (!ilo_hw) |
741 | return; | |
742 | ||
89bcb05d DA |
743 | clear_device(ilo_hw); |
744 | ||
745 | minor = MINOR(ilo_hw->cdev.dev); | |
98dcd59d | 746 | for (i = minor; i < minor + max_ccb; i++) |
89bcb05d DA |
747 | device_destroy(ilo_class, MKDEV(ilo_major, i)); |
748 | ||
749 | cdev_del(&ilo_hw->cdev); | |
9f704841 DA |
750 | ilo_disable_interrupts(ilo_hw); |
751 | free_irq(pdev->irq, ilo_hw); | |
89bcb05d DA |
752 | ilo_unmap_device(pdev, ilo_hw); |
753 | pci_release_regions(pdev); | |
bcdee04e JS |
754 | /* |
755 | * pci_disable_device(pdev) used to be here. But this PCI device has | |
756 | * two functions with interrupt lines connected to a single pin. The | |
757 | * other one is a USB host controller. So when we disable the PIN here | |
758 | * e.g. by rmmod hpilo, the controller stops working. It is because | |
759 | * the interrupt link is disabled in ACPI since it is not refcounted | |
760 | * yet. See acpi_pci_link_free_irq called from acpi_pci_irq_disable. | |
761 | */ | |
89bcb05d | 762 | kfree(ilo_hw); |
98dcd59d | 763 | ilo_hwdev[(minor / max_ccb)] = 0; |
89bcb05d DA |
764 | } |
765 | ||
80c8ae28 | 766 | static int ilo_probe(struct pci_dev *pdev, |
89bcb05d DA |
767 | const struct pci_device_id *ent) |
768 | { | |
ebf1b764 | 769 | int devnum, minor, start, error = 0; |
89bcb05d DA |
770 | struct ilo_hwinfo *ilo_hw; |
771 | ||
bc7de897 MH |
772 | if (pci_match_id(ilo_blacklist, pdev)) { |
773 | dev_dbg(&pdev->dev, "Not supported on this device\n"); | |
774 | return -ENODEV; | |
775 | } | |
ebf1b764 | 776 | |
98dcd59d CT |
777 | if (max_ccb > MAX_CCB) |
778 | max_ccb = MAX_CCB; | |
779 | else if (max_ccb < MIN_CCB) | |
780 | max_ccb = MIN_CCB; | |
781 | ||
89bcb05d DA |
782 | /* find a free range for device files */ |
783 | for (devnum = 0; devnum < MAX_ILO_DEV; devnum++) { | |
784 | if (ilo_hwdev[devnum] == 0) { | |
785 | ilo_hwdev[devnum] = 1; | |
786 | break; | |
787 | } | |
788 | } | |
789 | ||
790 | if (devnum == MAX_ILO_DEV) { | |
791 | dev_err(&pdev->dev, "Error finding free device\n"); | |
792 | return -ENODEV; | |
793 | } | |
794 | ||
795 | /* track global allocations for this device */ | |
796 | error = -ENOMEM; | |
797 | ilo_hw = kzalloc(sizeof(*ilo_hw), GFP_KERNEL); | |
798 | if (!ilo_hw) | |
799 | goto out; | |
800 | ||
801 | ilo_hw->ilo_dev = pdev; | |
802 | spin_lock_init(&ilo_hw->alloc_lock); | |
803 | spin_lock_init(&ilo_hw->fifo_lock); | |
9f704841 | 804 | spin_lock_init(&ilo_hw->open_lock); |
89bcb05d DA |
805 | |
806 | error = pci_enable_device(pdev); | |
807 | if (error) | |
808 | goto free; | |
809 | ||
810 | pci_set_master(pdev); | |
811 | ||
812 | error = pci_request_regions(pdev, ILO_NAME); | |
813 | if (error) | |
814 | goto disable; | |
815 | ||
816 | error = ilo_map_device(pdev, ilo_hw); | |
817 | if (error) | |
818 | goto free_regions; | |
819 | ||
820 | pci_set_drvdata(pdev, ilo_hw); | |
821 | clear_device(ilo_hw); | |
822 | ||
9f704841 DA |
823 | error = request_irq(pdev->irq, ilo_isr, IRQF_SHARED, "hpilo", ilo_hw); |
824 | if (error) | |
825 | goto unmap; | |
826 | ||
827 | ilo_enable_interrupts(ilo_hw); | |
828 | ||
89bcb05d DA |
829 | cdev_init(&ilo_hw->cdev, &ilo_fops); |
830 | ilo_hw->cdev.owner = THIS_MODULE; | |
98dcd59d CT |
831 | start = devnum * max_ccb; |
832 | error = cdev_add(&ilo_hw->cdev, MKDEV(ilo_major, start), max_ccb); | |
89bcb05d DA |
833 | if (error) { |
834 | dev_err(&pdev->dev, "Could not add cdev\n"); | |
9f704841 | 835 | goto remove_isr; |
89bcb05d DA |
836 | } |
837 | ||
98dcd59d | 838 | for (minor = 0 ; minor < max_ccb; minor++) { |
89bcb05d DA |
839 | struct device *dev; |
840 | dev = device_create(ilo_class, &pdev->dev, | |
841 | MKDEV(ilo_major, minor), NULL, | |
842 | "hpilo!d%dccb%d", devnum, minor); | |
843 | if (IS_ERR(dev)) | |
844 | dev_err(&pdev->dev, "Could not create files\n"); | |
845 | } | |
846 | ||
847 | return 0; | |
9f704841 DA |
848 | remove_isr: |
849 | ilo_disable_interrupts(ilo_hw); | |
850 | free_irq(pdev->irq, ilo_hw); | |
89bcb05d DA |
851 | unmap: |
852 | ilo_unmap_device(pdev, ilo_hw); | |
853 | free_regions: | |
854 | pci_release_regions(pdev); | |
855 | disable: | |
bcdee04e | 856 | /* pci_disable_device(pdev); see comment in ilo_remove */ |
89bcb05d DA |
857 | free: |
858 | kfree(ilo_hw); | |
859 | out: | |
860 | ilo_hwdev[devnum] = 0; | |
861 | return error; | |
862 | } | |
863 | ||
8efa6fb5 | 864 | static const struct pci_device_id ilo_devices[] = { |
89bcb05d | 865 | { PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB204) }, |
31d8b563 | 866 | { PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3307) }, |
89bcb05d DA |
867 | { } |
868 | }; | |
869 | MODULE_DEVICE_TABLE(pci, ilo_devices); | |
870 | ||
871 | static struct pci_driver ilo_driver = { | |
872 | .name = ILO_NAME, | |
873 | .id_table = ilo_devices, | |
874 | .probe = ilo_probe, | |
2d6bed9c | 875 | .remove = ilo_remove, |
89bcb05d DA |
876 | }; |
877 | ||
878 | static int __init ilo_init(void) | |
879 | { | |
880 | int error; | |
881 | dev_t dev; | |
882 | ||
883 | ilo_class = class_create(THIS_MODULE, "iLO"); | |
884 | if (IS_ERR(ilo_class)) { | |
885 | error = PTR_ERR(ilo_class); | |
886 | goto out; | |
887 | } | |
888 | ||
889 | error = alloc_chrdev_region(&dev, 0, MAX_OPEN, ILO_NAME); | |
890 | if (error) | |
891 | goto class_destroy; | |
892 | ||
893 | ilo_major = MAJOR(dev); | |
894 | ||
895 | error = pci_register_driver(&ilo_driver); | |
896 | if (error) | |
897 | goto chr_remove; | |
898 | ||
899 | return 0; | |
900 | chr_remove: | |
901 | unregister_chrdev_region(dev, MAX_OPEN); | |
902 | class_destroy: | |
903 | class_destroy(ilo_class); | |
904 | out: | |
905 | return error; | |
906 | } | |
907 | ||
908 | static void __exit ilo_exit(void) | |
909 | { | |
910 | pci_unregister_driver(&ilo_driver); | |
911 | unregister_chrdev_region(MKDEV(ilo_major, 0), MAX_OPEN); | |
912 | class_destroy(ilo_class); | |
913 | } | |
914 | ||
c9fef1cc | 915 | MODULE_VERSION("1.5.0"); |
89bcb05d DA |
916 | MODULE_ALIAS(ILO_NAME); |
917 | MODULE_DESCRIPTION(ILO_NAME); | |
6b1eb145 | 918 | MODULE_AUTHOR("David Altobelli <david.altobelli@hpe.com>"); |
89bcb05d DA |
919 | MODULE_LICENSE("GPL v2"); |
920 | ||
98dcd59d | 921 | module_param(max_ccb, uint, 0444); |
7a56f329 | 922 | MODULE_PARM_DESC(max_ccb, "Maximum number of HP iLO channels to attach (8-24)(default=16)"); |
98dcd59d | 923 | |
89bcb05d DA |
924 | module_init(ilo_init); |
925 | module_exit(ilo_exit); |