]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/scsi/libsas/sas_init.c
scsi: libsas: make the event threshold configurable
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / libsas / sas_init.c
CommitLineData
2908d778
JB
1/*
2 * Serial Attached SCSI (SAS) Transport Layer initialization
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * USA
23 *
24 */
25
26#include <linux/module.h>
5a0e3ad6 27#include <linux/slab.h>
2908d778
JB
28#include <linux/init.h>
29#include <linux/device.h>
30#include <linux/spinlock.h>
81c757bc 31#include <scsi/sas_ata.h>
2908d778
JB
32#include <scsi/scsi_host.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_transport.h>
35#include <scsi/scsi_transport_sas.h>
36
37#include "sas_internal.h"
38
39#include "../scsi_sas_internal.h"
40
4fcf812c 41static struct kmem_cache *sas_task_cache;
6bd9033a 42static struct kmem_cache *sas_event_cache;
4fcf812c
DW
43
44struct sas_task *sas_alloc_task(gfp_t flags)
45{
46 struct sas_task *task = kmem_cache_zalloc(sas_task_cache, flags);
47
48 if (task) {
4fcf812c
DW
49 spin_lock_init(&task->task_state_lock);
50 task->task_state_flags = SAS_TASK_STATE_PENDING;
4fcf812c
DW
51 }
52
53 return task;
54}
55EXPORT_SYMBOL_GPL(sas_alloc_task);
56
f0bf750c
DW
57struct sas_task *sas_alloc_slow_task(gfp_t flags)
58{
59 struct sas_task *task = sas_alloc_task(flags);
60 struct sas_task_slow *slow = kmalloc(sizeof(*slow), flags);
61
62 if (!task || !slow) {
63 if (task)
64 kmem_cache_free(sas_task_cache, task);
65 kfree(slow);
66 return NULL;
67 }
68
69 task->slow_task = slow;
77570eed
KC
70 slow->task = task;
71 timer_setup(&slow->timer, NULL, 0);
f0bf750c
DW
72 init_completion(&slow->completion);
73
74 return task;
75}
76EXPORT_SYMBOL_GPL(sas_alloc_slow_task);
77
4fcf812c
DW
78void sas_free_task(struct sas_task *task)
79{
80 if (task) {
f0bf750c 81 kfree(task->slow_task);
4fcf812c
DW
82 kmem_cache_free(sas_task_cache, task);
83 }
84}
85EXPORT_SYMBOL_GPL(sas_free_task);
2908d778
JB
86
87/*------------ SAS addr hash -----------*/
88void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
89{
90 const u32 poly = 0x00DB2777;
91 u32 r = 0;
92 int i;
93
94 for (i = 0; i < 8; i++) {
95 int b;
96 for (b = 7; b >= 0; b--) {
97 r <<= 1;
98 if ((1 << b) & sas_addr[i]) {
99 if (!(r & 0x01000000))
100 r ^= poly;
101 } else if (r & 0x01000000)
102 r ^= poly;
103 }
104 }
105
106 hashed[0] = (r >> 16) & 0xFF;
107 hashed[1] = (r >> 8) & 0xFF ;
108 hashed[2] = r & 0xFF;
109}
110
2908d778
JB
111int sas_register_ha(struct sas_ha_struct *sas_ha)
112{
113 int error = 0;
114
87c8331f 115 mutex_init(&sas_ha->disco_mutex);
2908d778
JB
116 spin_lock_init(&sas_ha->phy_port_lock);
117 sas_hash_addr(sas_ha->hashed_sas_addr, sas_ha->sas_addr);
118
f8daa6e6 119 set_bit(SAS_HA_REGISTERED, &sas_ha->state);
e4a9c373 120 spin_lock_init(&sas_ha->lock);
b1124cd3 121 mutex_init(&sas_ha->drain_mutex);
5db45bdc 122 init_waitqueue_head(&sas_ha->eh_wait_q);
b1124cd3 123 INIT_LIST_HEAD(&sas_ha->defer_q);
5db45bdc 124 INIT_LIST_HEAD(&sas_ha->eh_dev_q);
6b0efb85 125
ef84e289
JY
126 sas_ha->event_thres = SAS_PHY_SHUTDOWN_THRES;
127
2908d778
JB
128 error = sas_register_phys(sas_ha);
129 if (error) {
130 printk(KERN_NOTICE "couldn't register sas phys:%d\n", error);
131 return error;
132 }
133
134 error = sas_register_ports(sas_ha);
135 if (error) {
136 printk(KERN_NOTICE "couldn't register sas ports:%d\n", error);
137 goto Undo_phys;
138 }
139
140 error = sas_init_events(sas_ha);
141 if (error) {
142 printk(KERN_NOTICE "couldn't start event thread:%d\n", error);
143 goto Undo_ports;
144 }
145
f456393e 146 INIT_LIST_HEAD(&sas_ha->eh_done_q);
3944f509 147 INIT_LIST_HEAD(&sas_ha->eh_ata_q);
f456393e 148
2908d778 149 return 0;
2908d778
JB
150Undo_ports:
151 sas_unregister_ports(sas_ha);
152Undo_phys:
153
154 return error;
155}
156
303694ee 157static void sas_disable_events(struct sas_ha_struct *sas_ha)
2908d778 158{
b1124cd3 159 /* Set the state to unregistered to avoid further unchained
5d7f6d10 160 * events to be queued, and flush any in-progress drainers
b1124cd3 161 */
5d7f6d10 162 mutex_lock(&sas_ha->drain_mutex);
e4a9c373 163 spin_lock_irq(&sas_ha->lock);
f8daa6e6 164 clear_bit(SAS_HA_REGISTERED, &sas_ha->state);
e4a9c373 165 spin_unlock_irq(&sas_ha->lock);
5d7f6d10
DW
166 __sas_drain_work(sas_ha);
167 mutex_unlock(&sas_ha->drain_mutex);
303694ee 168}
6b0efb85 169
303694ee
DW
170int sas_unregister_ha(struct sas_ha_struct *sas_ha)
171{
172 sas_disable_events(sas_ha);
cde3f74b 173 sas_unregister_ports(sas_ha);
5d7f6d10
DW
174
175 /* flush unregistration work */
176 mutex_lock(&sas_ha->drain_mutex);
177 __sas_drain_work(sas_ha);
178 mutex_unlock(&sas_ha->drain_mutex);
cde3f74b 179
2908d778
JB
180 return 0;
181}
182
183static int sas_get_linkerrors(struct sas_phy *phy)
184{
ac013ed1
DW
185 if (scsi_is_sas_phy_local(phy)) {
186 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
187 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
188 struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
189 struct sas_internal *i =
190 to_sas_internal(sas_ha->core.shost->transportt);
191
192 return i->dft->lldd_control_phy(asd_phy, PHY_FUNC_GET_EVENTS, NULL);
193 }
2908d778
JB
194
195 return sas_smp_get_phy_events(phy);
196}
197
ab526633
DW
198int sas_try_ata_reset(struct asd_sas_phy *asd_phy)
199{
200 struct domain_device *dev = NULL;
201
202 /* try to route user requested link resets through libata */
203 if (asd_phy->port)
204 dev = asd_phy->port->port_dev;
205
206 /* validate that dev has been probed */
207 if (dev)
208 dev = sas_find_dev_by_rphy(dev->rphy);
209
210 if (dev && dev_is_sata(dev)) {
211 sas_ata_schedule_reset(dev);
212 sas_ata_wait_eh(dev);
213 return 0;
214 }
215
216 return -ENODEV;
217}
218
81c757bc
DW
219/**
220 * transport_sas_phy_reset - reset a phy and permit libata to manage the link
221 *
222 * phy reset request via sysfs in host workqueue context so we know we
223 * can block on eh and safely traverse the domain_device topology
224 */
225static int transport_sas_phy_reset(struct sas_phy *phy, int hard_reset)
226{
81c757bc
DW
227 enum phy_func reset_type;
228
229 if (hard_reset)
230 reset_type = PHY_FUNC_HARD_RESET;
231 else
232 reset_type = PHY_FUNC_LINK_RESET;
233
234 if (scsi_is_sas_phy_local(phy)) {
235 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
236 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
237 struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
238 struct sas_internal *i =
239 to_sas_internal(sas_ha->core.shost->transportt);
81c757bc 240
ab526633
DW
241 if (!hard_reset && sas_try_ata_reset(asd_phy) == 0)
242 return 0;
243 return i->dft->lldd_control_phy(asd_phy, reset_type, NULL);
81c757bc
DW
244 } else {
245 struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
246 struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
247 struct domain_device *ata_dev = sas_ex_to_ata(ddev, phy->number);
248
249 if (ata_dev && !hard_reset) {
250 sas_ata_schedule_reset(ata_dev);
251 sas_ata_wait_eh(ata_dev);
ab526633 252 return 0;
81c757bc 253 } else
ab526633 254 return sas_smp_phy_control(ddev, phy->number, reset_type, NULL);
81c757bc 255 }
81c757bc
DW
256}
257
2a559f4b 258static int sas_phy_enable(struct sas_phy *phy, int enable)
acbf167d
DW
259{
260 int ret;
2a559f4b 261 enum phy_func cmd;
acbf167d
DW
262
263 if (enable)
2a559f4b 264 cmd = PHY_FUNC_LINK_RESET;
acbf167d 265 else
2a559f4b 266 cmd = PHY_FUNC_DISABLE;
acbf167d
DW
267
268 if (scsi_is_sas_phy_local(phy)) {
269 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
270 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
271 struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
272 struct sas_internal *i =
273 to_sas_internal(sas_ha->core.shost->transportt);
274
2a559f4b
DW
275 if (enable)
276 ret = transport_sas_phy_reset(phy, 0);
1f4fe89c 277 else
2a559f4b 278 ret = i->dft->lldd_control_phy(asd_phy, cmd, NULL);
acbf167d
DW
279 } else {
280 struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
281 struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
2a559f4b
DW
282
283 if (enable)
284 ret = transport_sas_phy_reset(phy, 0);
285 else
286 ret = sas_smp_phy_control(ddev, phy->number, cmd, NULL);
acbf167d
DW
287 }
288 return ret;
289}
290
dea22214 291int sas_phy_reset(struct sas_phy *phy, int hard_reset)
2908d778
JB
292{
293 int ret;
294 enum phy_func reset_type;
295
26a2e68f
DW
296 if (!phy->enabled)
297 return -ENODEV;
298
2908d778
JB
299 if (hard_reset)
300 reset_type = PHY_FUNC_HARD_RESET;
301 else
302 reset_type = PHY_FUNC_LINK_RESET;
303
304 if (scsi_is_sas_phy_local(phy)) {
305 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
306 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
307 struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
308 struct sas_internal *i =
309 to_sas_internal(sas_ha->core.shost->transportt);
310
a01e70e5 311 ret = i->dft->lldd_control_phy(asd_phy, reset_type, NULL);
2908d778
JB
312 } else {
313 struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
314 struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
a01e70e5 315 ret = sas_smp_phy_control(ddev, phy->number, reset_type, NULL);
2908d778
JB
316 }
317 return ret;
318}
319
acbf167d
DW
320int sas_set_phy_speed(struct sas_phy *phy,
321 struct sas_phy_linkrates *rates)
a01e70e5
JB
322{
323 int ret;
324
325 if ((rates->minimum_linkrate &&
326 rates->minimum_linkrate > phy->maximum_linkrate) ||
327 (rates->maximum_linkrate &&
328 rates->maximum_linkrate < phy->minimum_linkrate))
329 return -EINVAL;
330
331 if (rates->minimum_linkrate &&
332 rates->minimum_linkrate < phy->minimum_linkrate_hw)
333 rates->minimum_linkrate = phy->minimum_linkrate_hw;
334
335 if (rates->maximum_linkrate &&
336 rates->maximum_linkrate > phy->maximum_linkrate_hw)
337 rates->maximum_linkrate = phy->maximum_linkrate_hw;
338
339 if (scsi_is_sas_phy_local(phy)) {
340 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
341 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
342 struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
343 struct sas_internal *i =
344 to_sas_internal(sas_ha->core.shost->transportt);
345
346 ret = i->dft->lldd_control_phy(asd_phy, PHY_FUNC_SET_LINK_RATE,
347 rates);
348 } else {
349 struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
350 struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
351 ret = sas_smp_phy_control(ddev, phy->number,
352 PHY_FUNC_LINK_RESET, rates);
353
354 }
355
356 return ret;
357}
358
303694ee
DW
359void sas_prep_resume_ha(struct sas_ha_struct *ha)
360{
361 int i;
362
363 set_bit(SAS_HA_REGISTERED, &ha->state);
364
365 /* clear out any stale link events/data from the suspension path */
366 for (i = 0; i < ha->num_phys; i++) {
367 struct asd_sas_phy *phy = ha->sas_phy[i];
368
369 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
303694ee
DW
370 phy->frame_rcvd_size = 0;
371 }
372}
373EXPORT_SYMBOL(sas_prep_resume_ha);
374
375static int phys_suspended(struct sas_ha_struct *ha)
376{
377 int i, rc = 0;
378
379 for (i = 0; i < ha->num_phys; i++) {
380 struct asd_sas_phy *phy = ha->sas_phy[i];
381
382 if (phy->suspended)
383 rc++;
384 }
385
386 return rc;
387}
388
389void sas_resume_ha(struct sas_ha_struct *ha)
390{
391 const unsigned long tmo = msecs_to_jiffies(25000);
392 int i;
393
394 /* deform ports on phys that did not resume
395 * at this point we may be racing the phy coming back (as posted
396 * by the lldd). So we post the event and once we are in the
397 * libsas context check that the phy remains suspended before
398 * tearing it down.
399 */
400 i = phys_suspended(ha);
401 if (i)
402 dev_info(ha->dev, "waiting up to 25 seconds for %d phy%s to resume\n",
403 i, i > 1 ? "s" : "");
404 wait_event_timeout(ha->eh_wait_q, phys_suspended(ha) == 0, tmo);
405 for (i = 0; i < ha->num_phys; i++) {
406 struct asd_sas_phy *phy = ha->sas_phy[i];
407
408 if (phy->suspended) {
409 dev_warn(&phy->phy->dev, "resume timeout\n");
410 sas_notify_phy_event(phy, PHYE_RESUME_TIMEOUT);
411 }
412 }
413
414 /* all phys are back up or timed out, turn on i/o so we can
415 * flush out disks that did not return
416 */
417 scsi_unblock_requests(ha->core.shost);
418 sas_drain_work(ha);
419}
420EXPORT_SYMBOL(sas_resume_ha);
421
422void sas_suspend_ha(struct sas_ha_struct *ha)
423{
424 int i;
425
426 sas_disable_events(ha);
427 scsi_block_requests(ha->core.shost);
428 for (i = 0; i < ha->num_phys; i++) {
429 struct asd_sas_port *port = ha->sas_port[i];
430
431 sas_discover_event(port, DISCE_SUSPEND);
432 }
433
434 /* flush suspend events while unregistered */
435 mutex_lock(&ha->drain_mutex);
436 __sas_drain_work(ha);
437 mutex_unlock(&ha->drain_mutex);
438}
439EXPORT_SYMBOL(sas_suspend_ha);
440
0b3e09da
DW
441static void sas_phy_release(struct sas_phy *phy)
442{
443 kfree(phy->hostdata);
444 phy->hostdata = NULL;
445}
446
447static void phy_reset_work(struct work_struct *work)
448{
22b9153f 449 struct sas_phy_data *d = container_of(work, typeof(*d), reset_work.work);
0b3e09da 450
81c757bc 451 d->reset_result = transport_sas_phy_reset(d->phy, d->hard_reset);
0b3e09da
DW
452}
453
2a559f4b
DW
454static void phy_enable_work(struct work_struct *work)
455{
22b9153f 456 struct sas_phy_data *d = container_of(work, typeof(*d), enable_work.work);
2a559f4b
DW
457
458 d->enable_result = sas_phy_enable(d->phy, d->enable);
459}
460
0b3e09da
DW
461static int sas_phy_setup(struct sas_phy *phy)
462{
463 struct sas_phy_data *d = kzalloc(sizeof(*d), GFP_KERNEL);
464
465 if (!d)
466 return -ENOMEM;
467
468 mutex_init(&d->event_lock);
22b9153f
DW
469 INIT_SAS_WORK(&d->reset_work, phy_reset_work);
470 INIT_SAS_WORK(&d->enable_work, phy_enable_work);
0b3e09da
DW
471 d->phy = phy;
472 phy->hostdata = d;
473
474 return 0;
475}
476
477static int queue_phy_reset(struct sas_phy *phy, int hard_reset)
478{
479 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
480 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
481 struct sas_phy_data *d = phy->hostdata;
482 int rc;
483
484 if (!d)
485 return -ENOMEM;
486
487 /* libsas workqueue coordinates ata-eh reset with discovery */
488 mutex_lock(&d->event_lock);
489 d->reset_result = 0;
490 d->hard_reset = hard_reset;
491
e4a9c373 492 spin_lock_irq(&ha->lock);
0b3e09da 493 sas_queue_work(ha, &d->reset_work);
e4a9c373 494 spin_unlock_irq(&ha->lock);
0b3e09da
DW
495
496 rc = sas_drain_work(ha);
497 if (rc == 0)
498 rc = d->reset_result;
499 mutex_unlock(&d->event_lock);
500
501 return rc;
502}
503
2a559f4b
DW
504static int queue_phy_enable(struct sas_phy *phy, int enable)
505{
506 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
507 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
508 struct sas_phy_data *d = phy->hostdata;
509 int rc;
510
511 if (!d)
512 return -ENOMEM;
513
514 /* libsas workqueue coordinates ata-eh reset with discovery */
515 mutex_lock(&d->event_lock);
516 d->enable_result = 0;
517 d->enable = enable;
518
e4a9c373 519 spin_lock_irq(&ha->lock);
2a559f4b 520 sas_queue_work(ha, &d->enable_work);
e4a9c373 521 spin_unlock_irq(&ha->lock);
2a559f4b
DW
522
523 rc = sas_drain_work(ha);
524 if (rc == 0)
525 rc = d->enable_result;
526 mutex_unlock(&d->event_lock);
527
528 return rc;
529}
530
2908d778 531static struct sas_function_template sft = {
2a559f4b 532 .phy_enable = queue_phy_enable,
0b3e09da
DW
533 .phy_reset = queue_phy_reset,
534 .phy_setup = sas_phy_setup,
535 .phy_release = sas_phy_release,
a01e70e5 536 .set_phy_speed = sas_set_phy_speed,
2908d778 537 .get_linkerrors = sas_get_linkerrors,
ba1fc175 538 .smp_handler = sas_smp_handler,
2908d778
JB
539};
540
3a878c41
JY
541static inline ssize_t phy_event_threshold_show(struct device *dev,
542 struct device_attribute *attr, char *buf)
543{
544 struct Scsi_Host *shost = class_to_shost(dev);
545 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
546
547 return scnprintf(buf, PAGE_SIZE, "%u\n", sha->event_thres);
548}
549
550static inline ssize_t phy_event_threshold_store(struct device *dev,
551 struct device_attribute *attr,
552 const char *buf, size_t count)
553{
554 struct Scsi_Host *shost = class_to_shost(dev);
555 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
556
557 sha->event_thres = simple_strtol(buf, NULL, 10);
558
559 /* threshold cannot be set too small */
560 if (sha->event_thres < 32)
561 sha->event_thres = 32;
562
563 return count;
564}
565
566DEVICE_ATTR(phy_event_threshold,
567 S_IRUGO|S_IWUSR,
568 phy_event_threshold_show,
569 phy_event_threshold_store);
570EXPORT_SYMBOL_GPL(dev_attr_phy_event_threshold);
571
2908d778
JB
572struct scsi_transport_template *
573sas_domain_attach_transport(struct sas_domain_function_template *dft)
574{
575 struct scsi_transport_template *stt = sas_attach_transport(&sft);
576 struct sas_internal *i;
577
578 if (!stt)
579 return stt;
580
581 i = to_sas_internal(stt);
582 i->dft = dft;
583 stt->create_work_queue = 1;
2908d778
JB
584 stt->eh_strategy_handler = sas_scsi_recover_host;
585
586 return stt;
587}
588EXPORT_SYMBOL_GPL(sas_domain_attach_transport);
589
6bd9033a
JY
590
591struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy)
592{
ef84e289 593 struct asd_sas_event *event;
6bd9033a 594 gfp_t flags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
ef84e289
JY
595 struct sas_ha_struct *sas_ha = phy->ha;
596 struct sas_internal *i =
597 to_sas_internal(sas_ha->core.shost->transportt);
598
599 event = kmem_cache_zalloc(sas_event_cache, flags);
600 if (!event)
601 return NULL;
6bd9033a 602
ef84e289
JY
603 atomic_inc(&phy->event_nr);
604
605 if (atomic_read(&phy->event_nr) > phy->ha->event_thres) {
606 if (i->dft->lldd_control_phy) {
607 if (cmpxchg(&phy->in_shutdown, 0, 1) == 0) {
608 sas_printk("The phy%02d bursting events, shut it down.\n",
609 phy->id);
610 sas_notify_phy_event(phy, PHYE_SHUTDOWN);
611 }
612 } else {
613 /* Do not support PHY control, stop allocating events */
614 WARN_ONCE(1, "PHY control not supported.\n");
615 kmem_cache_free(sas_event_cache, event);
616 atomic_dec(&phy->event_nr);
617 event = NULL;
618 }
619 }
620
621 return event;
6bd9033a
JY
622}
623
624void sas_free_event(struct asd_sas_event *event)
625{
ef84e289
JY
626 struct asd_sas_phy *phy = event->phy;
627
6bd9033a 628 kmem_cache_free(sas_event_cache, event);
ef84e289 629 atomic_dec(&phy->event_nr);
6bd9033a
JY
630}
631
2908d778
JB
632/* ---------- SAS Class register/unregister ---------- */
633
634static int __init sas_class_init(void)
635{
4fcf812c 636 sas_task_cache = KMEM_CACHE(sas_task, SLAB_HWCACHE_ALIGN);
2908d778 637 if (!sas_task_cache)
6bd9033a
JY
638 goto out;
639
640 sas_event_cache = KMEM_CACHE(asd_sas_event, SLAB_HWCACHE_ALIGN);
641 if (!sas_event_cache)
642 goto free_task_kmem;
2908d778
JB
643
644 return 0;
6bd9033a
JY
645free_task_kmem:
646 kmem_cache_destroy(sas_task_cache);
647out:
648 return -ENOMEM;
2908d778
JB
649}
650
651static void __exit sas_class_exit(void)
652{
653 kmem_cache_destroy(sas_task_cache);
6bd9033a 654 kmem_cache_destroy(sas_event_cache);
2908d778
JB
655}
656
657MODULE_AUTHOR("Luben Tuikov <luben_tuikov@adaptec.com>");
658MODULE_DESCRIPTION("SAS Transport Layer");
659MODULE_LICENSE("GPL v2");
660
661module_init(sas_class_init);
662module_exit(sas_class_exit);
663
664EXPORT_SYMBOL_GPL(sas_register_ha);
665EXPORT_SYMBOL_GPL(sas_unregister_ha);