target: target_core_configfs.h is not needed in fabric drivers
[deliverable/linux.git] / drivers / target / sbp / sbp_target.c
CommitLineData
a511ce33
CB
1/*
2 * SBP2 target driver (SCSI over IEEE1394 in target mode)
3 *
4 * Copyright (C) 2011 Chris Boot <bootc@bootc.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#define KMSG_COMPONENT "sbp_target"
22#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/types.h>
28#include <linux/string.h>
29#include <linux/configfs.h>
30#include <linux/ctype.h>
31#include <linux/firewire.h>
32#include <linux/firewire-constants.h>
33#include <scsi/scsi.h>
34#include <scsi/scsi_tcq.h>
35#include <target/target_core_base.h>
36#include <target/target_core_backend.h>
37#include <target/target_core_fabric.h>
38#include <target/target_core_fabric_configfs.h>
a511ce33
CB
39#include <target/configfs_macros.h>
40#include <asm/unaligned.h>
41
42#include "sbp_target.h"
43
9ac8928e 44static const struct target_core_fabric_ops sbp_ops;
a511ce33
CB
45
46/* FireWire address region for management and command block address handlers */
47static const struct fw_address_region sbp_register_region = {
48 .start = CSR_REGISTER_BASE + 0x10000,
49 .end = 0x1000000000000ULL,
50};
51
52static const u32 sbp_unit_directory_template[] = {
53 0x1200609e, /* unit_specifier_id: NCITS/T10 */
54 0x13010483, /* unit_sw_version: 1155D Rev 4 */
55 0x3800609e, /* command_set_specifier_id: NCITS/T10 */
56 0x390104d8, /* command_set: SPC-2 */
57 0x3b000000, /* command_set_revision: 0 */
58 0x3c000001, /* firmware_revision: 1 */
59};
60
61#define SESSION_MAINTENANCE_INTERVAL HZ
62
63static atomic_t login_id = ATOMIC_INIT(0);
64
65static void session_maintenance_work(struct work_struct *);
66static int sbp_run_transaction(struct fw_card *, int, int, int, int,
67 unsigned long long, void *, size_t);
68
69static int read_peer_guid(u64 *guid, const struct sbp_management_request *req)
70{
71 int ret;
72 __be32 high, low;
73
74 ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
75 req->node_addr, req->generation, req->speed,
76 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4,
77 &high, sizeof(high));
78 if (ret != RCODE_COMPLETE)
79 return ret;
80
81 ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
82 req->node_addr, req->generation, req->speed,
83 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4,
84 &low, sizeof(low));
85 if (ret != RCODE_COMPLETE)
86 return ret;
87
88 *guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low);
89
90 return RCODE_COMPLETE;
91}
92
93static struct sbp_session *sbp_session_find_by_guid(
94 struct sbp_tpg *tpg, u64 guid)
95{
96 struct se_session *se_sess;
97 struct sbp_session *sess, *found = NULL;
98
99 spin_lock_bh(&tpg->se_tpg.session_lock);
100 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
101 sess = se_sess->fabric_sess_ptr;
102 if (sess->guid == guid)
103 found = sess;
104 }
105 spin_unlock_bh(&tpg->se_tpg.session_lock);
106
107 return found;
108}
109
110static struct sbp_login_descriptor *sbp_login_find_by_lun(
111 struct sbp_session *session, struct se_lun *lun)
112{
113 struct sbp_login_descriptor *login, *found = NULL;
114
115 spin_lock_bh(&session->lock);
116 list_for_each_entry(login, &session->login_list, link) {
117 if (login->lun == lun)
118 found = login;
119 }
120 spin_unlock_bh(&session->lock);
121
122 return found;
123}
124
125static int sbp_login_count_all_by_lun(
126 struct sbp_tpg *tpg,
127 struct se_lun *lun,
128 int exclusive)
129{
130 struct se_session *se_sess;
131 struct sbp_session *sess;
132 struct sbp_login_descriptor *login;
133 int count = 0;
134
135 spin_lock_bh(&tpg->se_tpg.session_lock);
136 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
137 sess = se_sess->fabric_sess_ptr;
138
139 spin_lock_bh(&sess->lock);
140 list_for_each_entry(login, &sess->login_list, link) {
141 if (login->lun != lun)
142 continue;
143
144 if (!exclusive || login->exclusive)
145 count++;
146 }
147 spin_unlock_bh(&sess->lock);
148 }
149 spin_unlock_bh(&tpg->se_tpg.session_lock);
150
151 return count;
152}
153
154static struct sbp_login_descriptor *sbp_login_find_by_id(
155 struct sbp_tpg *tpg, int login_id)
156{
157 struct se_session *se_sess;
158 struct sbp_session *sess;
159 struct sbp_login_descriptor *login, *found = NULL;
160
161 spin_lock_bh(&tpg->se_tpg.session_lock);
162 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
163 sess = se_sess->fabric_sess_ptr;
164
165 spin_lock_bh(&sess->lock);
166 list_for_each_entry(login, &sess->login_list, link) {
167 if (login->login_id == login_id)
168 found = login;
169 }
170 spin_unlock_bh(&sess->lock);
171 }
172 spin_unlock_bh(&tpg->se_tpg.session_lock);
173
174 return found;
175}
176
177static struct se_lun *sbp_get_lun_from_tpg(struct sbp_tpg *tpg, int lun)
178{
179 struct se_portal_group *se_tpg = &tpg->se_tpg;
180 struct se_lun *se_lun;
181
182 if (lun >= TRANSPORT_MAX_LUNS_PER_TPG)
183 return ERR_PTR(-EINVAL);
184
185 spin_lock(&se_tpg->tpg_lun_lock);
186 se_lun = se_tpg->tpg_lun_list[lun];
187
188 if (se_lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
189 se_lun = ERR_PTR(-ENODEV);
190
191 spin_unlock(&se_tpg->tpg_lun_lock);
192
193 return se_lun;
194}
195
196static struct sbp_session *sbp_session_create(
197 struct sbp_tpg *tpg,
198 u64 guid)
199{
200 struct sbp_session *sess;
201 int ret;
202 char guid_str[17];
203 struct se_node_acl *se_nacl;
204
205 sess = kmalloc(sizeof(*sess), GFP_KERNEL);
206 if (!sess) {
207 pr_err("failed to allocate session descriptor\n");
208 return ERR_PTR(-ENOMEM);
209 }
210
e70beee7 211 sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
a511ce33
CB
212 if (IS_ERR(sess->se_sess)) {
213 pr_err("failed to init se_session\n");
214
215 ret = PTR_ERR(sess->se_sess);
216 kfree(sess);
217 return ERR_PTR(ret);
218 }
219
220 snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
221
222 se_nacl = core_tpg_check_initiator_node_acl(&tpg->se_tpg, guid_str);
223 if (!se_nacl) {
224 pr_warn("Node ACL not found for %s\n", guid_str);
225
226 transport_free_session(sess->se_sess);
227 kfree(sess);
228
229 return ERR_PTR(-EPERM);
230 }
231
232 sess->se_sess->se_node_acl = se_nacl;
233
234 spin_lock_init(&sess->lock);
235 INIT_LIST_HEAD(&sess->login_list);
236 INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
237
238 sess->guid = guid;
239
240 transport_register_session(&tpg->se_tpg, se_nacl, sess->se_sess, sess);
241
242 return sess;
243}
244
245static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
246{
247 spin_lock_bh(&sess->lock);
248 if (!list_empty(&sess->login_list)) {
249 spin_unlock_bh(&sess->lock);
250 return;
251 }
252 spin_unlock_bh(&sess->lock);
253
254 if (cancel_work)
255 cancel_delayed_work_sync(&sess->maint_work);
256
257 transport_deregister_session_configfs(sess->se_sess);
258 transport_deregister_session(sess->se_sess);
259
260 if (sess->card)
261 fw_card_put(sess->card);
262
263 kfree(sess);
264}
265
266static void sbp_target_agent_unregister(struct sbp_target_agent *);
267
268static void sbp_login_release(struct sbp_login_descriptor *login,
269 bool cancel_work)
270{
271 struct sbp_session *sess = login->sess;
272
273 /* FIXME: abort/wait on tasks */
274
275 sbp_target_agent_unregister(login->tgt_agt);
276
277 if (sess) {
278 spin_lock_bh(&sess->lock);
279 list_del(&login->link);
280 spin_unlock_bh(&sess->lock);
281
282 sbp_session_release(sess, cancel_work);
283 }
284
285 kfree(login);
286}
287
288static struct sbp_target_agent *sbp_target_agent_register(
289 struct sbp_login_descriptor *);
290
291static void sbp_management_request_login(
292 struct sbp_management_agent *agent, struct sbp_management_request *req,
293 int *status_data_size)
294{
295 struct sbp_tport *tport = agent->tport;
296 struct sbp_tpg *tpg = tport->tpg;
297 struct se_lun *se_lun;
298 int ret;
299 u64 guid;
300 struct sbp_session *sess;
301 struct sbp_login_descriptor *login;
302 struct sbp_login_response_block *response;
303 int login_response_len;
304
305 se_lun = sbp_get_lun_from_tpg(tpg,
306 LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
307 if (IS_ERR(se_lun)) {
308 pr_notice("login to unknown LUN: %d\n",
309 LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
310
311 req->status.status = cpu_to_be32(
312 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
313 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP));
314 return;
315 }
316
317 ret = read_peer_guid(&guid, req);
318 if (ret != RCODE_COMPLETE) {
319 pr_warn("failed to read peer GUID: %d\n", ret);
320
321 req->status.status = cpu_to_be32(
322 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
323 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
324 return;
325 }
326
327 pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
328 se_lun->unpacked_lun, guid);
329
330 sess = sbp_session_find_by_guid(tpg, guid);
331 if (sess) {
332 login = sbp_login_find_by_lun(sess, se_lun);
333 if (login) {
334 pr_notice("initiator already logged-in\n");
335
336 /*
337 * SBP-2 R4 says we should return access denied, but
338 * that can confuse initiators. Instead we need to
339 * treat this like a reconnect, but send the login
340 * response block like a fresh login.
341 *
342 * This is required particularly in the case of Apple
343 * devices booting off the FireWire target, where
344 * the firmware has an active login to the target. When
345 * the OS takes control of the session it issues its own
346 * LOGIN rather than a RECONNECT. To avoid the machine
347 * waiting until the reconnect_hold expires, we can skip
348 * the ACCESS_DENIED errors to speed things up.
349 */
350
351 goto already_logged_in;
352 }
353 }
354
355 /*
356 * check exclusive bit in login request
357 * reject with access_denied if any logins present
358 */
359 if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
360 sbp_login_count_all_by_lun(tpg, se_lun, 0)) {
361 pr_warn("refusing exclusive login with other active logins\n");
362
363 req->status.status = cpu_to_be32(
364 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
365 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
366 return;
367 }
368
369 /*
370 * check exclusive bit in any existing login descriptor
371 * reject with access_denied if any exclusive logins present
372 */
373 if (sbp_login_count_all_by_lun(tpg, se_lun, 1)) {
374 pr_warn("refusing login while another exclusive login present\n");
375
376 req->status.status = cpu_to_be32(
377 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
378 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
379 return;
380 }
381
382 /*
383 * check we haven't exceeded the number of allowed logins
384 * reject with resources_unavailable if we have
385 */
386 if (sbp_login_count_all_by_lun(tpg, se_lun, 0) >=
387 tport->max_logins_per_lun) {
388 pr_warn("max number of logins reached\n");
389
390 req->status.status = cpu_to_be32(
391 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
392 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
393 return;
394 }
395
396 if (!sess) {
397 sess = sbp_session_create(tpg, guid);
398 if (IS_ERR(sess)) {
399 switch (PTR_ERR(sess)) {
400 case -EPERM:
401 ret = SBP_STATUS_ACCESS_DENIED;
402 break;
403 default:
404 ret = SBP_STATUS_RESOURCES_UNAVAIL;
405 break;
406 }
407
408 req->status.status = cpu_to_be32(
409 STATUS_BLOCK_RESP(
410 STATUS_RESP_REQUEST_COMPLETE) |
411 STATUS_BLOCK_SBP_STATUS(ret));
412 return;
413 }
414
415 sess->node_id = req->node_addr;
416 sess->card = fw_card_get(req->card);
417 sess->generation = req->generation;
418 sess->speed = req->speed;
419
420 schedule_delayed_work(&sess->maint_work,
421 SESSION_MAINTENANCE_INTERVAL);
422 }
423
424 /* only take the latest reconnect_hold into account */
425 sess->reconnect_hold = min(
426 1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)),
427 tport->max_reconnect_timeout) - 1;
428
429 login = kmalloc(sizeof(*login), GFP_KERNEL);
430 if (!login) {
431 pr_err("failed to allocate login descriptor\n");
432
433 sbp_session_release(sess, true);
434
435 req->status.status = cpu_to_be32(
436 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
437 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
438 return;
439 }
440
441 login->sess = sess;
442 login->lun = se_lun;
443 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
444 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
445 login->login_id = atomic_inc_return(&login_id);
446
447 login->tgt_agt = sbp_target_agent_register(login);
448 if (IS_ERR(login->tgt_agt)) {
449 ret = PTR_ERR(login->tgt_agt);
450 pr_err("failed to map command block handler: %d\n", ret);
451
452 sbp_session_release(sess, true);
453 kfree(login);
454
455 req->status.status = cpu_to_be32(
456 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
457 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
458 return;
459 }
460
461 spin_lock_bh(&sess->lock);
462 list_add_tail(&login->link, &sess->login_list);
463 spin_unlock_bh(&sess->lock);
464
465already_logged_in:
466 response = kzalloc(sizeof(*response), GFP_KERNEL);
467 if (!response) {
468 pr_err("failed to allocate login response block\n");
469
470 sbp_login_release(login, true);
471
472 req->status.status = cpu_to_be32(
473 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
474 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
475 return;
476 }
477
478 login_response_len = clamp_val(
479 LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)),
480 12, sizeof(*response));
481 response->misc = cpu_to_be32(
482 ((login_response_len & 0xffff) << 16) |
483 (login->login_id & 0xffff));
484 response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff);
485 addr_to_sbp2_pointer(login->tgt_agt->handler.offset,
486 &response->command_block_agent);
487
488 ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST,
489 sess->node_id, sess->generation, sess->speed,
490 sbp2_pointer_to_addr(&req->orb.ptr2), response,
491 login_response_len);
492 if (ret != RCODE_COMPLETE) {
493 pr_debug("failed to write login response block: %x\n", ret);
494
495 kfree(response);
496 sbp_login_release(login, true);
497
498 req->status.status = cpu_to_be32(
499 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
500 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
501 return;
502 }
503
504 kfree(response);
505
506 req->status.status = cpu_to_be32(
507 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
508 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
509}
510
511static void sbp_management_request_query_logins(
512 struct sbp_management_agent *agent, struct sbp_management_request *req,
513 int *status_data_size)
514{
515 pr_notice("QUERY LOGINS not implemented\n");
516 /* FIXME: implement */
517
518 req->status.status = cpu_to_be32(
519 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
520 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
521}
522
523static void sbp_management_request_reconnect(
524 struct sbp_management_agent *agent, struct sbp_management_request *req,
525 int *status_data_size)
526{
527 struct sbp_tport *tport = agent->tport;
528 struct sbp_tpg *tpg = tport->tpg;
529 int ret;
530 u64 guid;
531 struct sbp_login_descriptor *login;
532
533 ret = read_peer_guid(&guid, req);
534 if (ret != RCODE_COMPLETE) {
535 pr_warn("failed to read peer GUID: %d\n", ret);
536
537 req->status.status = cpu_to_be32(
538 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
539 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
540 return;
541 }
542
543 pr_notice("mgt_agent RECONNECT from %016llx\n", guid);
544
545 login = sbp_login_find_by_id(tpg,
546 RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)));
547
548 if (!login) {
549 pr_err("mgt_agent RECONNECT unknown login ID\n");
550
551 req->status.status = cpu_to_be32(
552 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
553 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
554 return;
555 }
556
557 if (login->sess->guid != guid) {
558 pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
559
560 req->status.status = cpu_to_be32(
561 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
562 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
563 return;
564 }
565
566 spin_lock_bh(&login->sess->lock);
567 if (login->sess->card)
568 fw_card_put(login->sess->card);
569
570 /* update the node details */
571 login->sess->generation = req->generation;
572 login->sess->node_id = req->node_addr;
573 login->sess->card = fw_card_get(req->card);
574 login->sess->speed = req->speed;
575 spin_unlock_bh(&login->sess->lock);
576
577 req->status.status = cpu_to_be32(
578 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
579 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
580}
581
582static void sbp_management_request_logout(
583 struct sbp_management_agent *agent, struct sbp_management_request *req,
584 int *status_data_size)
585{
586 struct sbp_tport *tport = agent->tport;
587 struct sbp_tpg *tpg = tport->tpg;
5f2a3d61 588 int id;
a511ce33
CB
589 struct sbp_login_descriptor *login;
590
5f2a3d61 591 id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
a511ce33 592
5f2a3d61 593 login = sbp_login_find_by_id(tpg, id);
a511ce33 594 if (!login) {
5f2a3d61 595 pr_warn("cannot find login: %d\n", id);
a511ce33
CB
596
597 req->status.status = cpu_to_be32(
598 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
599 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN));
600 return;
601 }
602
603 pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
604 login->lun->unpacked_lun, login->login_id);
605
606 if (req->node_addr != login->sess->node_id) {
607 pr_warn("logout from different node ID\n");
608
609 req->status.status = cpu_to_be32(
610 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
611 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
612 return;
613 }
614
615 sbp_login_release(login, true);
616
617 req->status.status = cpu_to_be32(
618 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
619 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
620}
621
622static void session_check_for_reset(struct sbp_session *sess)
623{
624 bool card_valid = false;
625
626 spin_lock_bh(&sess->lock);
627
628 if (sess->card) {
629 spin_lock_irq(&sess->card->lock);
630 card_valid = (sess->card->local_node != NULL);
631 spin_unlock_irq(&sess->card->lock);
632
633 if (!card_valid) {
634 fw_card_put(sess->card);
635 sess->card = NULL;
636 }
637 }
638
639 if (!card_valid || (sess->generation != sess->card->generation)) {
640 pr_info("Waiting for reconnect from node: %016llx\n",
641 sess->guid);
642
643 sess->node_id = -1;
644 sess->reconnect_expires = get_jiffies_64() +
645 ((sess->reconnect_hold + 1) * HZ);
646 }
647
648 spin_unlock_bh(&sess->lock);
649}
650
651static void session_reconnect_expired(struct sbp_session *sess)
652{
653 struct sbp_login_descriptor *login, *temp;
654 LIST_HEAD(login_list);
655
656 pr_info("Reconnect timer expired for node: %016llx\n", sess->guid);
657
658 spin_lock_bh(&sess->lock);
659 list_for_each_entry_safe(login, temp, &sess->login_list, link) {
660 login->sess = NULL;
bf11eefc 661 list_move_tail(&login->link, &login_list);
a511ce33
CB
662 }
663 spin_unlock_bh(&sess->lock);
664
665 list_for_each_entry_safe(login, temp, &login_list, link) {
666 list_del(&login->link);
667 sbp_login_release(login, false);
668 }
669
670 sbp_session_release(sess, false);
671}
672
673static void session_maintenance_work(struct work_struct *work)
674{
675 struct sbp_session *sess = container_of(work, struct sbp_session,
676 maint_work.work);
677
678 /* could be called while tearing down the session */
679 spin_lock_bh(&sess->lock);
680 if (list_empty(&sess->login_list)) {
681 spin_unlock_bh(&sess->lock);
682 return;
683 }
684 spin_unlock_bh(&sess->lock);
685
686 if (sess->node_id != -1) {
687 /* check for bus reset and make node_id invalid */
688 session_check_for_reset(sess);
689
690 schedule_delayed_work(&sess->maint_work,
691 SESSION_MAINTENANCE_INTERVAL);
692 } else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) {
693 /* still waiting for reconnect */
694 schedule_delayed_work(&sess->maint_work,
695 SESSION_MAINTENANCE_INTERVAL);
696 } else {
697 /* reconnect timeout has expired */
698 session_reconnect_expired(sess);
699 }
700}
701
702static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
703 struct sbp_target_agent *agent)
704{
37419d67 705 int state;
a511ce33
CB
706
707 switch (tcode) {
708 case TCODE_READ_QUADLET_REQUEST:
709 pr_debug("tgt_agent AGENT_STATE READ\n");
710
711 spin_lock_bh(&agent->lock);
37419d67 712 state = agent->state;
a511ce33 713 spin_unlock_bh(&agent->lock);
37419d67
CB
714
715 *(__be32 *)data = cpu_to_be32(state);
a511ce33
CB
716
717 return RCODE_COMPLETE;
718
719 case TCODE_WRITE_QUADLET_REQUEST:
720 /* ignored */
721 return RCODE_COMPLETE;
722
723 default:
724 return RCODE_TYPE_ERROR;
725 }
726}
727
728static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data,
729 struct sbp_target_agent *agent)
730{
731 switch (tcode) {
732 case TCODE_WRITE_QUADLET_REQUEST:
733 pr_debug("tgt_agent AGENT_RESET\n");
734 spin_lock_bh(&agent->lock);
735 agent->state = AGENT_STATE_RESET;
736 spin_unlock_bh(&agent->lock);
737 return RCODE_COMPLETE;
738
739 default:
740 return RCODE_TYPE_ERROR;
741 }
742}
743
744static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
745 struct sbp_target_agent *agent)
746{
747 struct sbp2_pointer *ptr = data;
748
749 switch (tcode) {
750 case TCODE_WRITE_BLOCK_REQUEST:
751 spin_lock_bh(&agent->lock);
752 if (agent->state != AGENT_STATE_SUSPENDED &&
753 agent->state != AGENT_STATE_RESET) {
754 spin_unlock_bh(&agent->lock);
755 pr_notice("Ignoring ORB_POINTER write while active.\n");
756 return RCODE_CONFLICT_ERROR;
757 }
758 agent->state = AGENT_STATE_ACTIVE;
759 spin_unlock_bh(&agent->lock);
760
761 agent->orb_pointer = sbp2_pointer_to_addr(ptr);
762 agent->doorbell = false;
763
764 pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
765 agent->orb_pointer);
766
767 queue_work(system_unbound_wq, &agent->work);
768
769 return RCODE_COMPLETE;
770
771 case TCODE_READ_BLOCK_REQUEST:
772 pr_debug("tgt_agent ORB_POINTER READ\n");
773 spin_lock_bh(&agent->lock);
774 addr_to_sbp2_pointer(agent->orb_pointer, ptr);
775 spin_unlock_bh(&agent->lock);
776 return RCODE_COMPLETE;
777
778 default:
779 return RCODE_TYPE_ERROR;
780 }
781}
782
783static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
784 struct sbp_target_agent *agent)
785{
786 switch (tcode) {
787 case TCODE_WRITE_QUADLET_REQUEST:
788 spin_lock_bh(&agent->lock);
789 if (agent->state != AGENT_STATE_SUSPENDED) {
790 spin_unlock_bh(&agent->lock);
791 pr_debug("Ignoring DOORBELL while active.\n");
792 return RCODE_CONFLICT_ERROR;
793 }
794 agent->state = AGENT_STATE_ACTIVE;
795 spin_unlock_bh(&agent->lock);
796
797 agent->doorbell = true;
798
799 pr_debug("tgt_agent DOORBELL\n");
800
801 queue_work(system_unbound_wq, &agent->work);
802
803 return RCODE_COMPLETE;
804
805 case TCODE_READ_QUADLET_REQUEST:
806 return RCODE_COMPLETE;
807
808 default:
809 return RCODE_TYPE_ERROR;
810 }
811}
812
813static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card,
814 int tcode, void *data, struct sbp_target_agent *agent)
815{
816 switch (tcode) {
817 case TCODE_WRITE_QUADLET_REQUEST:
818 pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
819 /* ignored as we don't send unsolicited status */
820 return RCODE_COMPLETE;
821
822 case TCODE_READ_QUADLET_REQUEST:
823 return RCODE_COMPLETE;
824
825 default:
826 return RCODE_TYPE_ERROR;
827 }
828}
829
830static void tgt_agent_rw(struct fw_card *card, struct fw_request *request,
831 int tcode, int destination, int source, int generation,
832 unsigned long long offset, void *data, size_t length,
833 void *callback_data)
834{
835 struct sbp_target_agent *agent = callback_data;
836 struct sbp_session *sess = agent->login->sess;
837 int sess_gen, sess_node, rcode;
838
839 spin_lock_bh(&sess->lock);
840 sess_gen = sess->generation;
841 sess_node = sess->node_id;
842 spin_unlock_bh(&sess->lock);
843
844 if (generation != sess_gen) {
845 pr_notice("ignoring request with wrong generation\n");
846 rcode = RCODE_TYPE_ERROR;
847 goto out;
848 }
849
850 if (source != sess_node) {
851 pr_notice("ignoring request from foreign node (%x != %x)\n",
852 source, sess_node);
853 rcode = RCODE_TYPE_ERROR;
854 goto out;
855 }
856
857 /* turn offset into the offset from the start of the block */
858 offset -= agent->handler.offset;
859
860 if (offset == 0x00 && length == 4) {
861 /* AGENT_STATE */
862 rcode = tgt_agent_rw_agent_state(card, tcode, data, agent);
863 } else if (offset == 0x04 && length == 4) {
864 /* AGENT_RESET */
865 rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent);
866 } else if (offset == 0x08 && length == 8) {
867 /* ORB_POINTER */
868 rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent);
869 } else if (offset == 0x10 && length == 4) {
870 /* DOORBELL */
871 rcode = tgt_agent_rw_doorbell(card, tcode, data, agent);
872 } else if (offset == 0x14 && length == 4) {
873 /* UNSOLICITED_STATUS_ENABLE */
874 rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode,
875 data, agent);
876 } else {
877 rcode = RCODE_ADDRESS_ERROR;
878 }
879
880out:
881 fw_send_response(card, request, rcode);
882}
883
884static void sbp_handle_command(struct sbp_target_request *);
885static int sbp_send_status(struct sbp_target_request *);
886static void sbp_free_request(struct sbp_target_request *);
887
888static void tgt_agent_process_work(struct work_struct *work)
889{
890 struct sbp_target_request *req =
891 container_of(work, struct sbp_target_request, work);
892
893 pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
894 req->orb_pointer,
895 sbp2_pointer_to_addr(&req->orb.next_orb),
896 sbp2_pointer_to_addr(&req->orb.data_descriptor),
897 be32_to_cpu(req->orb.misc));
898
899 if (req->orb_pointer >> 32)
900 pr_debug("ORB with high bits set\n");
901
902 switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) {
903 case 0:/* Format specified by this standard */
904 sbp_handle_command(req);
905 return;
906 case 1: /* Reserved for future standardization */
907 case 2: /* Vendor-dependent */
908 req->status.status |= cpu_to_be32(
909 STATUS_BLOCK_RESP(
910 STATUS_RESP_REQUEST_COMPLETE) |
911 STATUS_BLOCK_DEAD(0) |
912 STATUS_BLOCK_LEN(1) |
913 STATUS_BLOCK_SBP_STATUS(
914 SBP_STATUS_REQ_TYPE_NOTSUPP));
915 sbp_send_status(req);
916 sbp_free_request(req);
917 return;
918 case 3: /* Dummy ORB */
919 req->status.status |= cpu_to_be32(
920 STATUS_BLOCK_RESP(
921 STATUS_RESP_REQUEST_COMPLETE) |
922 STATUS_BLOCK_DEAD(0) |
923 STATUS_BLOCK_LEN(1) |
924 STATUS_BLOCK_SBP_STATUS(
925 SBP_STATUS_DUMMY_ORB_COMPLETE));
926 sbp_send_status(req);
927 sbp_free_request(req);
928 return;
929 default:
930 BUG();
931 }
932}
933
934/* used to double-check we haven't been issued an AGENT_RESET */
935static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
936{
937 bool active;
938
939 spin_lock_bh(&agent->lock);
940 active = (agent->state == AGENT_STATE_ACTIVE);
941 spin_unlock_bh(&agent->lock);
942
943 return active;
944}
945
946static void tgt_agent_fetch_work(struct work_struct *work)
947{
948 struct sbp_target_agent *agent =
949 container_of(work, struct sbp_target_agent, work);
950 struct sbp_session *sess = agent->login->sess;
951 struct sbp_target_request *req;
952 int ret;
953 bool doorbell = agent->doorbell;
954 u64 next_orb = agent->orb_pointer;
955
956 while (next_orb && tgt_agent_check_active(agent)) {
957 req = kzalloc(sizeof(*req), GFP_KERNEL);
958 if (!req) {
959 spin_lock_bh(&agent->lock);
960 agent->state = AGENT_STATE_DEAD;
961 spin_unlock_bh(&agent->lock);
962 return;
963 }
964
965 req->login = agent->login;
966 req->orb_pointer = next_orb;
967
968 req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
969 req->orb_pointer >> 32));
970 req->status.orb_low = cpu_to_be32(
971 req->orb_pointer & 0xfffffffc);
972
973 /* read in the ORB */
974 ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST,
975 sess->node_id, sess->generation, sess->speed,
976 req->orb_pointer, &req->orb, sizeof(req->orb));
977 if (ret != RCODE_COMPLETE) {
978 pr_debug("tgt_orb fetch failed: %x\n", ret);
979 req->status.status |= cpu_to_be32(
980 STATUS_BLOCK_SRC(
981 STATUS_SRC_ORB_FINISHED) |
982 STATUS_BLOCK_RESP(
983 STATUS_RESP_TRANSPORT_FAILURE) |
984 STATUS_BLOCK_DEAD(1) |
985 STATUS_BLOCK_LEN(1) |
986 STATUS_BLOCK_SBP_STATUS(
987 SBP_STATUS_UNSPECIFIED_ERROR));
988 spin_lock_bh(&agent->lock);
989 agent->state = AGENT_STATE_DEAD;
990 spin_unlock_bh(&agent->lock);
991
992 sbp_send_status(req);
993 sbp_free_request(req);
994 return;
995 }
996
997 /* check the next_ORB field */
998 if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) {
999 next_orb = 0;
1000 req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
1001 STATUS_SRC_ORB_FINISHED));
1002 } else {
1003 next_orb = sbp2_pointer_to_addr(&req->orb.next_orb);
1004 req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
1005 STATUS_SRC_ORB_CONTINUING));
1006 }
1007
1008 if (tgt_agent_check_active(agent) && !doorbell) {
1009 INIT_WORK(&req->work, tgt_agent_process_work);
1010 queue_work(system_unbound_wq, &req->work);
1011 } else {
1012 /* don't process this request, just check next_ORB */
1013 sbp_free_request(req);
1014 }
1015
1016 spin_lock_bh(&agent->lock);
1017 doorbell = agent->doorbell = false;
1018
1019 /* check if we should carry on processing */
1020 if (next_orb)
1021 agent->orb_pointer = next_orb;
1022 else
1023 agent->state = AGENT_STATE_SUSPENDED;
1024
1025 spin_unlock_bh(&agent->lock);
1026 };
1027}
1028
1029static struct sbp_target_agent *sbp_target_agent_register(
1030 struct sbp_login_descriptor *login)
1031{
1032 struct sbp_target_agent *agent;
1033 int ret;
1034
1035 agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1036 if (!agent)
1037 return ERR_PTR(-ENOMEM);
1038
1039 spin_lock_init(&agent->lock);
1040
1041 agent->handler.length = 0x20;
1042 agent->handler.address_callback = tgt_agent_rw;
1043 agent->handler.callback_data = agent;
1044
1045 agent->login = login;
1046 agent->state = AGENT_STATE_RESET;
1047 INIT_WORK(&agent->work, tgt_agent_fetch_work);
1048 agent->orb_pointer = 0;
1049 agent->doorbell = false;
1050
1051 ret = fw_core_add_address_handler(&agent->handler,
1052 &sbp_register_region);
1053 if (ret < 0) {
1054 kfree(agent);
1055 return ERR_PTR(ret);
1056 }
1057
1058 return agent;
1059}
1060
1061static void sbp_target_agent_unregister(struct sbp_target_agent *agent)
1062{
1063 fw_core_remove_address_handler(&agent->handler);
1064 cancel_work_sync(&agent->work);
1065 kfree(agent);
1066}
1067
1068/*
1069 * Simple wrapper around fw_run_transaction that retries the transaction several
1070 * times in case of failure, with an exponential backoff.
1071 */
1072static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id,
1073 int generation, int speed, unsigned long long offset,
1074 void *payload, size_t length)
1075{
1076 int attempt, ret, delay;
1077
1078 for (attempt = 1; attempt <= 5; attempt++) {
1079 ret = fw_run_transaction(card, tcode, destination_id,
1080 generation, speed, offset, payload, length);
1081
1082 switch (ret) {
1083 case RCODE_COMPLETE:
1084 case RCODE_TYPE_ERROR:
1085 case RCODE_ADDRESS_ERROR:
1086 case RCODE_GENERATION:
1087 return ret;
1088
1089 default:
1090 delay = 5 * attempt * attempt;
1091 usleep_range(delay, delay * 2);
1092 }
1093 }
1094
1095 return ret;
1096}
1097
1098/*
1099 * Wrapper around sbp_run_transaction that gets the card, destination,
1100 * generation and speed out of the request's session.
1101 */
1102static int sbp_run_request_transaction(struct sbp_target_request *req,
1103 int tcode, unsigned long long offset, void *payload,
1104 size_t length)
1105{
1106 struct sbp_login_descriptor *login = req->login;
1107 struct sbp_session *sess = login->sess;
1108 struct fw_card *card;
1109 int node_id, generation, speed, ret;
1110
1111 spin_lock_bh(&sess->lock);
1112 card = fw_card_get(sess->card);
1113 node_id = sess->node_id;
1114 generation = sess->generation;
1115 speed = sess->speed;
1116 spin_unlock_bh(&sess->lock);
1117
1118 ret = sbp_run_transaction(card, tcode, node_id, generation, speed,
1119 offset, payload, length);
1120
1121 fw_card_put(card);
1122
1123 return ret;
1124}
1125
1126static int sbp_fetch_command(struct sbp_target_request *req)
1127{
1128 int ret, cmd_len, copy_len;
1129
1130 cmd_len = scsi_command_size(req->orb.command_block);
1131
1132 req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL);
1133 if (!req->cmd_buf)
1134 return -ENOMEM;
1135
1136 memcpy(req->cmd_buf, req->orb.command_block,
1137 min_t(int, cmd_len, sizeof(req->orb.command_block)));
1138
1139 if (cmd_len > sizeof(req->orb.command_block)) {
1140 pr_debug("sbp_fetch_command: filling in long command\n");
1141 copy_len = cmd_len - sizeof(req->orb.command_block);
1142
1143 ret = sbp_run_request_transaction(req,
1144 TCODE_READ_BLOCK_REQUEST,
1145 req->orb_pointer + sizeof(req->orb),
1146 req->cmd_buf + sizeof(req->orb.command_block),
1147 copy_len);
1148 if (ret != RCODE_COMPLETE)
1149 return -EIO;
1150 }
1151
1152 return 0;
1153}
1154
1155static int sbp_fetch_page_table(struct sbp_target_request *req)
1156{
1157 int pg_tbl_sz, ret;
1158 struct sbp_page_table_entry *pg_tbl;
1159
1160 if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc)))
1161 return 0;
1162
1163 pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) *
1164 sizeof(struct sbp_page_table_entry);
1165
1166 pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL);
1167 if (!pg_tbl)
1168 return -ENOMEM;
1169
1170 ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST,
1171 sbp2_pointer_to_addr(&req->orb.data_descriptor),
1172 pg_tbl, pg_tbl_sz);
1173 if (ret != RCODE_COMPLETE) {
1174 kfree(pg_tbl);
1175 return -EIO;
1176 }
1177
1178 req->pg_tbl = pg_tbl;
1179 return 0;
1180}
1181
1182static void sbp_calc_data_length_direction(struct sbp_target_request *req,
1183 u32 *data_len, enum dma_data_direction *data_dir)
1184{
1185 int data_size, direction, idx;
1186
1187 data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1188 direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc));
1189
1190 if (!data_size) {
1191 *data_len = 0;
1192 *data_dir = DMA_NONE;
1193 return;
1194 }
1195
1196 *data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1197
1198 if (req->pg_tbl) {
1199 *data_len = 0;
1200 for (idx = 0; idx < data_size; idx++) {
1201 *data_len += be16_to_cpu(
1202 req->pg_tbl[idx].segment_length);
1203 }
1204 } else {
1205 *data_len = data_size;
1206 }
1207}
1208
1209static void sbp_handle_command(struct sbp_target_request *req)
1210{
1211 struct sbp_login_descriptor *login = req->login;
1212 struct sbp_session *sess = login->sess;
1213 int ret, unpacked_lun;
1214 u32 data_length;
1215 enum dma_data_direction data_dir;
1216
1217 ret = sbp_fetch_command(req);
1218 if (ret) {
1219 pr_debug("sbp_handle_command: fetch command failed: %d\n", ret);
7c78b8de 1220 goto err;
a511ce33
CB
1221 }
1222
1223 ret = sbp_fetch_page_table(req);
1224 if (ret) {
1225 pr_debug("sbp_handle_command: fetch page table failed: %d\n",
1226 ret);
7c78b8de 1227 goto err;
a511ce33
CB
1228 }
1229
1230 unpacked_lun = req->login->lun->unpacked_lun;
1231 sbp_calc_data_length_direction(req, &data_length, &data_dir);
1232
1233 pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
1234 req->orb_pointer, unpacked_lun, data_length, data_dir);
1235
649ee054
BVA
1236 /* only used for printk until we do TMRs */
1237 req->se_cmd.tag = req->orb_pointer;
d6dfc868
RD
1238 if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
1239 req->sense_buf, unpacked_lun, data_length,
68d81f40 1240 TCM_SIMPLE_TAG, data_dir, 0))
d6dfc868
RD
1241 goto err;
1242
7c78b8de
RD
1243 return;
1244
1245err:
1246 req->status.status |= cpu_to_be32(
1247 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1248 STATUS_BLOCK_DEAD(0) |
1249 STATUS_BLOCK_LEN(1) |
1250 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1251 sbp_send_status(req);
1252 sbp_free_request(req);
a511ce33
CB
1253}
1254
1255/*
1256 * DMA_TO_DEVICE = read from initiator (SCSI WRITE)
1257 * DMA_FROM_DEVICE = write to initiator (SCSI READ)
1258 */
1259static int sbp_rw_data(struct sbp_target_request *req)
1260{
1261 struct sbp_session *sess = req->login->sess;
1262 int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id,
1263 generation, num_pte, length, tfr_length,
1264 rcode = RCODE_COMPLETE;
1265 struct sbp_page_table_entry *pte;
1266 unsigned long long offset;
1267 struct fw_card *card;
1268 struct sg_mapping_iter iter;
1269
1270 if (req->se_cmd.data_direction == DMA_FROM_DEVICE) {
1271 tcode = TCODE_WRITE_BLOCK_REQUEST;
1272 sg_miter_flags = SG_MITER_FROM_SG;
1273 } else {
1274 tcode = TCODE_READ_BLOCK_REQUEST;
1275 sg_miter_flags = SG_MITER_TO_SG;
1276 }
1277
1278 max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc));
1279 speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc));
1280
1281 pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc));
1282 if (pg_size) {
1283 pr_err("sbp_run_transaction: page size ignored\n");
1284 pg_size = 0x100 << pg_size;
1285 }
1286
1287 spin_lock_bh(&sess->lock);
1288 card = fw_card_get(sess->card);
1289 node_id = sess->node_id;
1290 generation = sess->generation;
1291 spin_unlock_bh(&sess->lock);
1292
1293 if (req->pg_tbl) {
1294 pte = req->pg_tbl;
1295 num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1296
1297 offset = 0;
1298 length = 0;
1299 } else {
1300 pte = NULL;
1301 num_pte = 0;
1302
1303 offset = sbp2_pointer_to_addr(&req->orb.data_descriptor);
1304 length = req->se_cmd.data_length;
1305 }
1306
1307 sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents,
1308 sg_miter_flags);
1309
1310 while (length || num_pte) {
1311 if (!length) {
1312 offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 |
1313 be32_to_cpu(pte->segment_base_lo);
1314 length = be16_to_cpu(pte->segment_length);
1315
1316 pte++;
1317 num_pte--;
1318 }
1319
1320 sg_miter_next(&iter);
1321
1322 tfr_length = min3(length, max_payload, (int)iter.length);
1323
1324 /* FIXME: take page_size into account */
1325
1326 rcode = sbp_run_transaction(card, tcode, node_id,
1327 generation, speed,
1328 offset, iter.addr, tfr_length);
1329
1330 if (rcode != RCODE_COMPLETE)
1331 break;
1332
1333 length -= tfr_length;
1334 offset += tfr_length;
1335 iter.consumed = tfr_length;
1336 }
1337
1338 sg_miter_stop(&iter);
1339 fw_card_put(card);
1340
1341 if (rcode == RCODE_COMPLETE) {
1342 WARN_ON(length != 0);
1343 return 0;
1344 } else {
1345 return -EIO;
1346 }
1347}
1348
1349static int sbp_send_status(struct sbp_target_request *req)
1350{
1351 int ret, length;
1352 struct sbp_login_descriptor *login = req->login;
1353
1354 length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
1355
1356 ret = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
1357 login->status_fifo_addr, &req->status, length);
1358 if (ret != RCODE_COMPLETE) {
1359 pr_debug("sbp_send_status: write failed: 0x%x\n", ret);
1360 return -EIO;
1361 }
1362
1363 pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
1364 req->orb_pointer);
1365
1366 return 0;
1367}
1368
1369static void sbp_sense_mangle(struct sbp_target_request *req)
1370{
1371 struct se_cmd *se_cmd = &req->se_cmd;
1372 u8 *sense = req->sense_buf;
1373 u8 *status = req->status.data;
1374
1375 WARN_ON(se_cmd->scsi_sense_length < 18);
1376
1377 switch (sense[0] & 0x7f) { /* sfmt */
1378 case 0x70: /* current, fixed */
1379 status[0] = 0 << 6;
1380 break;
1381 case 0x71: /* deferred, fixed */
1382 status[0] = 1 << 6;
1383 break;
1384 case 0x72: /* current, descriptor */
1385 case 0x73: /* deferred, descriptor */
1386 default:
1387 /*
1388 * TODO: SBP-3 specifies what we should do with descriptor
1389 * format sense data
1390 */
1391 pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
1392 sense[0]);
1393 req->status.status |= cpu_to_be32(
1394 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1395 STATUS_BLOCK_DEAD(0) |
1396 STATUS_BLOCK_LEN(1) |
1397 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED));
1398 return;
1399 }
1400
1401 status[0] |= se_cmd->scsi_status & 0x3f;/* status */
1402 status[1] =
1403 (sense[0] & 0x80) | /* valid */
1404 ((sense[2] & 0xe0) >> 1) | /* mark, eom, ili */
1405 (sense[2] & 0x0f); /* sense_key */
1406 status[2] = se_cmd->scsi_asc; /* sense_code */
1407 status[3] = se_cmd->scsi_ascq; /* sense_qualifier */
1408
1409 /* information */
1410 status[4] = sense[3];
1411 status[5] = sense[4];
1412 status[6] = sense[5];
1413 status[7] = sense[6];
1414
1415 /* CDB-dependent */
1416 status[8] = sense[8];
1417 status[9] = sense[9];
1418 status[10] = sense[10];
1419 status[11] = sense[11];
1420
1421 /* fru */
1422 status[12] = sense[14];
1423
1424 /* sense_key-dependent */
1425 status[13] = sense[15];
1426 status[14] = sense[16];
1427 status[15] = sense[17];
1428
1429 req->status.status |= cpu_to_be32(
1430 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1431 STATUS_BLOCK_DEAD(0) |
1432 STATUS_BLOCK_LEN(5) |
1433 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1434}
1435
1436static int sbp_send_sense(struct sbp_target_request *req)
1437{
1438 struct se_cmd *se_cmd = &req->se_cmd;
1439
1440 if (se_cmd->scsi_sense_length) {
1441 sbp_sense_mangle(req);
1442 } else {
1443 req->status.status |= cpu_to_be32(
1444 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1445 STATUS_BLOCK_DEAD(0) |
1446 STATUS_BLOCK_LEN(1) |
1447 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1448 }
1449
1450 return sbp_send_status(req);
1451}
1452
1453static void sbp_free_request(struct sbp_target_request *req)
1454{
1455 kfree(req->pg_tbl);
1456 kfree(req->cmd_buf);
1457 kfree(req);
1458}
1459
1460static void sbp_mgt_agent_process(struct work_struct *work)
1461{
1462 struct sbp_management_agent *agent =
1463 container_of(work, struct sbp_management_agent, work);
1464 struct sbp_management_request *req = agent->request;
1465 int ret;
1466 int status_data_len = 0;
1467
1468 /* fetch the ORB from the initiator */
1469 ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST,
1470 req->node_addr, req->generation, req->speed,
1471 agent->orb_offset, &req->orb, sizeof(req->orb));
1472 if (ret != RCODE_COMPLETE) {
1473 pr_debug("mgt_orb fetch failed: %x\n", ret);
1474 goto out;
1475 }
1476
1477 pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
1478 sbp2_pointer_to_addr(&req->orb.ptr1),
1479 sbp2_pointer_to_addr(&req->orb.ptr2),
1480 be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length),
1481 sbp2_pointer_to_addr(&req->orb.status_fifo));
1482
1483 if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) ||
1484 ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) {
1485 pr_err("mgt_orb bad request\n");
1486 goto out;
1487 }
1488
1489 switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) {
1490 case MANAGEMENT_ORB_FUNCTION_LOGIN:
1491 sbp_management_request_login(agent, req, &status_data_len);
1492 break;
1493
1494 case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS:
1495 sbp_management_request_query_logins(agent, req,
1496 &status_data_len);
1497 break;
1498
1499 case MANAGEMENT_ORB_FUNCTION_RECONNECT:
1500 sbp_management_request_reconnect(agent, req, &status_data_len);
1501 break;
1502
1503 case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD:
1504 pr_notice("SET PASSWORD not implemented\n");
1505
1506 req->status.status = cpu_to_be32(
1507 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1508 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1509
1510 break;
1511
1512 case MANAGEMENT_ORB_FUNCTION_LOGOUT:
1513 sbp_management_request_logout(agent, req, &status_data_len);
1514 break;
1515
1516 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK:
1517 pr_notice("ABORT TASK not implemented\n");
1518
1519 req->status.status = cpu_to_be32(
1520 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1521 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1522
1523 break;
1524
1525 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET:
1526 pr_notice("ABORT TASK SET not implemented\n");
1527
1528 req->status.status = cpu_to_be32(
1529 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1530 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1531
1532 break;
1533
1534 case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET:
1535 pr_notice("LOGICAL UNIT RESET not implemented\n");
1536
1537 req->status.status = cpu_to_be32(
1538 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1539 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1540
1541 break;
1542
1543 case MANAGEMENT_ORB_FUNCTION_TARGET_RESET:
1544 pr_notice("TARGET RESET not implemented\n");
1545
1546 req->status.status = cpu_to_be32(
1547 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1548 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1549
1550 break;
1551
1552 default:
1553 pr_notice("unknown management function 0x%x\n",
1554 MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc)));
1555
1556 req->status.status = cpu_to_be32(
1557 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1558 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1559
1560 break;
1561 }
1562
1563 req->status.status |= cpu_to_be32(
1564 STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
1565 STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) |
1566 STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32));
1567 req->status.orb_low = cpu_to_be32(agent->orb_offset);
1568
1569 /* write the status block back to the initiator */
1570 ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST,
1571 req->node_addr, req->generation, req->speed,
1572 sbp2_pointer_to_addr(&req->orb.status_fifo),
1573 &req->status, 8 + status_data_len);
1574 if (ret != RCODE_COMPLETE) {
1575 pr_debug("mgt_orb status write failed: %x\n", ret);
1576 goto out;
1577 }
1578
1579out:
1580 fw_card_put(req->card);
1581 kfree(req);
1582
1583 spin_lock_bh(&agent->lock);
1584 agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1585 spin_unlock_bh(&agent->lock);
1586}
1587
1588static void sbp_mgt_agent_rw(struct fw_card *card,
1589 struct fw_request *request, int tcode, int destination, int source,
1590 int generation, unsigned long long offset, void *data, size_t length,
1591 void *callback_data)
1592{
1593 struct sbp_management_agent *agent = callback_data;
1594 struct sbp2_pointer *ptr = data;
1595 int rcode = RCODE_ADDRESS_ERROR;
1596
1597 if (!agent->tport->enable)
1598 goto out;
1599
1600 if ((offset != agent->handler.offset) || (length != 8))
1601 goto out;
1602
1603 if (tcode == TCODE_WRITE_BLOCK_REQUEST) {
1604 struct sbp_management_request *req;
1605 int prev_state;
1606
1607 spin_lock_bh(&agent->lock);
1608 prev_state = agent->state;
1609 agent->state = MANAGEMENT_AGENT_STATE_BUSY;
1610 spin_unlock_bh(&agent->lock);
1611
1612 if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) {
1613 pr_notice("ignoring management request while busy\n");
1614 rcode = RCODE_CONFLICT_ERROR;
1615 goto out;
1616 }
1617
1618 req = kzalloc(sizeof(*req), GFP_ATOMIC);
1619 if (!req) {
1620 rcode = RCODE_CONFLICT_ERROR;
1621 goto out;
1622 }
1623
1624 req->card = fw_card_get(card);
1625 req->generation = generation;
1626 req->node_addr = source;
1627 req->speed = fw_get_request_speed(request);
1628
1629 agent->orb_offset = sbp2_pointer_to_addr(ptr);
1630 agent->request = req;
1631
1632 queue_work(system_unbound_wq, &agent->work);
1633 rcode = RCODE_COMPLETE;
1634 } else if (tcode == TCODE_READ_BLOCK_REQUEST) {
1635 addr_to_sbp2_pointer(agent->orb_offset, ptr);
1636 rcode = RCODE_COMPLETE;
1637 } else {
1638 rcode = RCODE_TYPE_ERROR;
1639 }
1640
1641out:
1642 fw_send_response(card, request, rcode);
1643}
1644
1645static struct sbp_management_agent *sbp_management_agent_register(
1646 struct sbp_tport *tport)
1647{
1648 int ret;
1649 struct sbp_management_agent *agent;
1650
1651 agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1652 if (!agent)
1653 return ERR_PTR(-ENOMEM);
1654
1655 spin_lock_init(&agent->lock);
1656 agent->tport = tport;
1657 agent->handler.length = 0x08;
1658 agent->handler.address_callback = sbp_mgt_agent_rw;
1659 agent->handler.callback_data = agent;
1660 agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1661 INIT_WORK(&agent->work, sbp_mgt_agent_process);
1662 agent->orb_offset = 0;
1663 agent->request = NULL;
1664
1665 ret = fw_core_add_address_handler(&agent->handler,
1666 &sbp_register_region);
1667 if (ret < 0) {
1668 kfree(agent);
1669 return ERR_PTR(ret);
1670 }
1671
1672 return agent;
1673}
1674
1675static void sbp_management_agent_unregister(struct sbp_management_agent *agent)
1676{
1677 fw_core_remove_address_handler(&agent->handler);
1678 cancel_work_sync(&agent->work);
1679 kfree(agent);
1680}
1681
1682static int sbp_check_true(struct se_portal_group *se_tpg)
1683{
1684 return 1;
1685}
1686
1687static int sbp_check_false(struct se_portal_group *se_tpg)
1688{
1689 return 0;
1690}
1691
1692static char *sbp_get_fabric_name(void)
1693{
1694 return "sbp";
1695}
1696
1697static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg)
1698{
1699 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1700 struct sbp_tport *tport = tpg->tport;
1701
1702 return &tport->tport_name[0];
1703}
1704
1705static u16 sbp_get_tag(struct se_portal_group *se_tpg)
1706{
1707 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1708 return tpg->tport_tpgt;
1709}
1710
a511ce33
CB
1711static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg)
1712{
1713 return 1;
1714}
1715
1716static void sbp_release_cmd(struct se_cmd *se_cmd)
1717{
1718 struct sbp_target_request *req = container_of(se_cmd,
1719 struct sbp_target_request, se_cmd);
1720
1721 sbp_free_request(req);
1722}
1723
1724static int sbp_shutdown_session(struct se_session *se_sess)
1725{
1726 return 0;
1727}
1728
1729static void sbp_close_session(struct se_session *se_sess)
1730{
1731 return;
1732}
1733
1734static u32 sbp_sess_get_index(struct se_session *se_sess)
1735{
1736 return 0;
1737}
1738
1739static int sbp_write_pending(struct se_cmd *se_cmd)
1740{
1741 struct sbp_target_request *req = container_of(se_cmd,
1742 struct sbp_target_request, se_cmd);
1743 int ret;
1744
1745 ret = sbp_rw_data(req);
1746 if (ret) {
1747 req->status.status |= cpu_to_be32(
1748 STATUS_BLOCK_RESP(
1749 STATUS_RESP_TRANSPORT_FAILURE) |
1750 STATUS_BLOCK_DEAD(0) |
1751 STATUS_BLOCK_LEN(1) |
1752 STATUS_BLOCK_SBP_STATUS(
1753 SBP_STATUS_UNSPECIFIED_ERROR));
1754 sbp_send_status(req);
1755 return ret;
1756 }
1757
70baf0ab 1758 target_execute_cmd(se_cmd);
a511ce33
CB
1759 return 0;
1760}
1761
1762static int sbp_write_pending_status(struct se_cmd *se_cmd)
1763{
1764 return 0;
1765}
1766
1767static void sbp_set_default_node_attrs(struct se_node_acl *nacl)
1768{
1769 return;
1770}
1771
a511ce33
CB
1772static int sbp_get_cmd_state(struct se_cmd *se_cmd)
1773{
1774 return 0;
1775}
1776
1777static int sbp_queue_data_in(struct se_cmd *se_cmd)
1778{
1779 struct sbp_target_request *req = container_of(se_cmd,
1780 struct sbp_target_request, se_cmd);
1781 int ret;
1782
1783 ret = sbp_rw_data(req);
1784 if (ret) {
1785 req->status.status |= cpu_to_be32(
1786 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1787 STATUS_BLOCK_DEAD(0) |
1788 STATUS_BLOCK_LEN(1) |
1789 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1790 sbp_send_status(req);
1791 return ret;
1792 }
1793
1794 return sbp_send_sense(req);
1795}
1796
1797/*
1798 * Called after command (no data transfer) or after the write (to device)
1799 * operation is completed
1800 */
1801static int sbp_queue_status(struct se_cmd *se_cmd)
1802{
1803 struct sbp_target_request *req = container_of(se_cmd,
1804 struct sbp_target_request, se_cmd);
1805
1806 return sbp_send_sense(req);
1807}
1808
b79fafac 1809static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)
a511ce33 1810{
a511ce33
CB
1811}
1812
131e6abc
NB
1813static void sbp_aborted_task(struct se_cmd *se_cmd)
1814{
1815 return;
1816}
1817
a511ce33
CB
1818static int sbp_check_stop_free(struct se_cmd *se_cmd)
1819{
1820 struct sbp_target_request *req = container_of(se_cmd,
1821 struct sbp_target_request, se_cmd);
1822
1823 transport_generic_free_cmd(&req->se_cmd, 0);
1824 return 1;
1825}
1826
a511ce33
CB
1827static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
1828{
1829 int i, count = 0;
1830
1831 spin_lock(&tpg->tpg_lun_lock);
1832 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
1833 struct se_lun *se_lun = tpg->tpg_lun_list[i];
1834
1835 if (se_lun->lun_status == TRANSPORT_LUN_STATUS_FREE)
1836 continue;
1837
1838 count++;
1839 }
1840 spin_unlock(&tpg->tpg_lun_lock);
1841
1842 return count;
1843}
1844
1845static int sbp_update_unit_directory(struct sbp_tport *tport)
1846{
1847 int num_luns, num_entries, idx = 0, mgt_agt_addr, ret, i;
1848 u32 *data;
1849
1850 if (tport->unit_directory.data) {
1851 fw_core_remove_descriptor(&tport->unit_directory);
1852 kfree(tport->unit_directory.data);
1853 tport->unit_directory.data = NULL;
1854 }
1855
1856 if (!tport->enable || !tport->tpg)
1857 return 0;
1858
1859 num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg);
1860
1861 /*
1862 * Number of entries in the final unit directory:
1863 * - all of those in the template
1864 * - management_agent
1865 * - unit_characteristics
1866 * - reconnect_timeout
1867 * - unit unique ID
1868 * - one for each LUN
1869 *
1870 * MUST NOT include leaf or sub-directory entries
1871 */
1872 num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns;
1873
1874 if (tport->directory_id != -1)
1875 num_entries++;
1876
1877 /* allocate num_entries + 4 for the header and unique ID leaf */
1878 data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL);
1879 if (!data)
1880 return -ENOMEM;
1881
1882 /* directory_length */
1883 data[idx++] = num_entries << 16;
1884
1885 /* directory_id */
1886 if (tport->directory_id != -1)
1887 data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id;
1888
1889 /* unit directory template */
1890 memcpy(&data[idx], sbp_unit_directory_template,
1891 sizeof(sbp_unit_directory_template));
1892 idx += ARRAY_SIZE(sbp_unit_directory_template);
1893
1894 /* management_agent */
1895 mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4;
1896 data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff);
1897
1898 /* unit_characteristics */
1899 data[idx++] = 0x3a000000 |
1900 (((tport->mgt_orb_timeout * 2) << 8) & 0xff00) |
1901 SBP_ORB_FETCH_SIZE;
1902
1903 /* reconnect_timeout */
1904 data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff);
1905
1906 /* unit unique ID (leaf is just after LUNs) */
1907 data[idx++] = 0x8d000000 | (num_luns + 1);
1908
1909 spin_lock(&tport->tpg->se_tpg.tpg_lun_lock);
1910 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
1911 struct se_lun *se_lun = tport->tpg->se_tpg.tpg_lun_list[i];
1912 struct se_device *dev;
1913 int type;
1914
1915 if (se_lun->lun_status == TRANSPORT_LUN_STATUS_FREE)
1916 continue;
1917
1918 spin_unlock(&tport->tpg->se_tpg.tpg_lun_lock);
1919
1920 dev = se_lun->lun_se_dev;
1921 type = dev->transport->get_device_type(dev);
1922
1923 /* logical_unit_number */
1924 data[idx++] = 0x14000000 |
1925 ((type << 16) & 0x1f0000) |
1926 (se_lun->unpacked_lun & 0xffff);
1927
1928 spin_lock(&tport->tpg->se_tpg.tpg_lun_lock);
1929 }
1930 spin_unlock(&tport->tpg->se_tpg.tpg_lun_lock);
1931
1932 /* unit unique ID leaf */
1933 data[idx++] = 2 << 16;
1934 data[idx++] = tport->guid >> 32;
1935 data[idx++] = tport->guid;
1936
1937 tport->unit_directory.length = idx;
1938 tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24;
1939 tport->unit_directory.data = data;
1940
1941 ret = fw_core_add_descriptor(&tport->unit_directory);
1942 if (ret < 0) {
1943 kfree(tport->unit_directory.data);
1944 tport->unit_directory.data = NULL;
1945 }
1946
1947 return ret;
1948}
1949
343d475d 1950static ssize_t sbp_parse_wwn(const char *name, u64 *wwn)
a511ce33
CB
1951{
1952 const char *cp;
1953 char c, nibble;
1954 int pos = 0, err;
1955
1956 *wwn = 0;
1957 for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) {
1958 c = *cp;
1959 if (c == '\n' && cp[1] == '\0')
1960 continue;
1961 if (c == '\0') {
1962 err = 2;
1963 if (pos != 16)
1964 goto fail;
1965 return cp - name;
1966 }
1967 err = 3;
1968 if (isdigit(c))
1969 nibble = c - '0';
343d475d 1970 else if (isxdigit(c))
a511ce33
CB
1971 nibble = tolower(c) - 'a' + 10;
1972 else
1973 goto fail;
1974 *wwn = (*wwn << 4) | nibble;
1975 pos++;
1976 }
1977 err = 4;
1978fail:
1979 printk(KERN_INFO "err %u len %zu pos %u\n",
1980 err, cp - name, pos);
1981 return -1;
1982}
1983
1984static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn)
1985{
1986 return snprintf(buf, len, "%016llx", wwn);
1987}
1988
c7d6a803 1989static int sbp_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
a511ce33 1990{
a511ce33 1991 u64 guid = 0;
a511ce33 1992
343d475d 1993 if (sbp_parse_wwn(name, &guid) < 0)
c7d6a803
CH
1994 return -EINVAL;
1995 return 0;
a511ce33
CB
1996}
1997
1998static int sbp_post_link_lun(
1999 struct se_portal_group *se_tpg,
2000 struct se_lun *se_lun)
2001{
2002 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2003
2004 return sbp_update_unit_directory(tpg->tport);
2005}
2006
2007static void sbp_pre_unlink_lun(
2008 struct se_portal_group *se_tpg,
2009 struct se_lun *se_lun)
2010{
2011 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2012 struct sbp_tport *tport = tpg->tport;
2013 int ret;
2014
2015 if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0)
2016 tport->enable = 0;
2017
2018 ret = sbp_update_unit_directory(tport);
2019 if (ret < 0)
2020 pr_err("unlink LUN: failed to update unit directory\n");
2021}
2022
2023static struct se_portal_group *sbp_make_tpg(
2024 struct se_wwn *wwn,
2025 struct config_group *group,
2026 const char *name)
2027{
2028 struct sbp_tport *tport =
2029 container_of(wwn, struct sbp_tport, tport_wwn);
2030
2031 struct sbp_tpg *tpg;
2032 unsigned long tpgt;
2033 int ret;
2034
2035 if (strstr(name, "tpgt_") != name)
2036 return ERR_PTR(-EINVAL);
2037 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
2038 return ERR_PTR(-EINVAL);
2039
2040 if (tport->tpg) {
2041 pr_err("Only one TPG per Unit is possible.\n");
2042 return ERR_PTR(-EBUSY);
2043 }
2044
2045 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2046 if (!tpg) {
2047 pr_err("Unable to allocate struct sbp_tpg\n");
2048 return ERR_PTR(-ENOMEM);
2049 }
2050
2051 tpg->tport = tport;
2052 tpg->tport_tpgt = tpgt;
2053 tport->tpg = tpg;
2054
2055 /* default attribute values */
2056 tport->enable = 0;
2057 tport->directory_id = -1;
2058 tport->mgt_orb_timeout = 15;
2059 tport->max_reconnect_timeout = 5;
2060 tport->max_logins_per_lun = 1;
2061
2062 tport->mgt_agt = sbp_management_agent_register(tport);
2063 if (IS_ERR(tport->mgt_agt)) {
2064 ret = PTR_ERR(tport->mgt_agt);
e1fe2060 2065 goto out_free_tpg;
a511ce33
CB
2066 }
2067
e4aae5af 2068 ret = core_tpg_register(&sbp_ops, wwn, &tpg->se_tpg, SCSI_PROTOCOL_SBP);
e1fe2060
CB
2069 if (ret < 0)
2070 goto out_unreg_mgt_agt;
a511ce33
CB
2071
2072 return &tpg->se_tpg;
e1fe2060
CB
2073
2074out_unreg_mgt_agt:
2075 sbp_management_agent_unregister(tport->mgt_agt);
2076out_free_tpg:
2077 tport->tpg = NULL;
2078 kfree(tpg);
2079 return ERR_PTR(ret);
a511ce33
CB
2080}
2081
2082static void sbp_drop_tpg(struct se_portal_group *se_tpg)
2083{
2084 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2085 struct sbp_tport *tport = tpg->tport;
2086
2087 core_tpg_deregister(se_tpg);
2088 sbp_management_agent_unregister(tport->mgt_agt);
2089 tport->tpg = NULL;
2090 kfree(tpg);
2091}
2092
2093static struct se_wwn *sbp_make_tport(
2094 struct target_fabric_configfs *tf,
2095 struct config_group *group,
2096 const char *name)
2097{
2098 struct sbp_tport *tport;
2099 u64 guid = 0;
2100
343d475d 2101 if (sbp_parse_wwn(name, &guid) < 0)
a511ce33
CB
2102 return ERR_PTR(-EINVAL);
2103
2104 tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2105 if (!tport) {
2106 pr_err("Unable to allocate struct sbp_tport\n");
2107 return ERR_PTR(-ENOMEM);
2108 }
2109
2110 tport->guid = guid;
2111 sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid);
2112
2113 return &tport->tport_wwn;
2114}
2115
2116static void sbp_drop_tport(struct se_wwn *wwn)
2117{
2118 struct sbp_tport *tport =
2119 container_of(wwn, struct sbp_tport, tport_wwn);
2120
2121 kfree(tport);
2122}
2123
2124static ssize_t sbp_wwn_show_attr_version(
2125 struct target_fabric_configfs *tf,
2126 char *page)
2127{
2128 return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION);
2129}
2130
2131TF_WWN_ATTR_RO(sbp, version);
2132
2133static struct configfs_attribute *sbp_wwn_attrs[] = {
2134 &sbp_wwn_version.attr,
2135 NULL,
2136};
2137
2138static ssize_t sbp_tpg_show_directory_id(
2139 struct se_portal_group *se_tpg,
2140 char *page)
2141{
2142 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2143 struct sbp_tport *tport = tpg->tport;
2144
2145 if (tport->directory_id == -1)
2146 return sprintf(page, "implicit\n");
2147 else
2148 return sprintf(page, "%06x\n", tport->directory_id);
2149}
2150
2151static ssize_t sbp_tpg_store_directory_id(
2152 struct se_portal_group *se_tpg,
2153 const char *page,
2154 size_t count)
2155{
2156 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2157 struct sbp_tport *tport = tpg->tport;
2158 unsigned long val;
2159
2160 if (tport->enable) {
2161 pr_err("Cannot change the directory_id on an active target.\n");
2162 return -EBUSY;
2163 }
2164
2165 if (strstr(page, "implicit") == page) {
2166 tport->directory_id = -1;
2167 } else {
2168 if (kstrtoul(page, 16, &val) < 0)
2169 return -EINVAL;
2170 if (val > 0xffffff)
2171 return -EINVAL;
2172
2173 tport->directory_id = val;
2174 }
2175
2176 return count;
2177}
2178
2179static ssize_t sbp_tpg_show_enable(
2180 struct se_portal_group *se_tpg,
2181 char *page)
2182{
2183 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2184 struct sbp_tport *tport = tpg->tport;
2185 return sprintf(page, "%d\n", tport->enable);
2186}
2187
2188static ssize_t sbp_tpg_store_enable(
2189 struct se_portal_group *se_tpg,
2190 const char *page,
2191 size_t count)
2192{
2193 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2194 struct sbp_tport *tport = tpg->tport;
2195 unsigned long val;
2196 int ret;
2197
2198 if (kstrtoul(page, 0, &val) < 0)
2199 return -EINVAL;
2200 if ((val != 0) && (val != 1))
2201 return -EINVAL;
2202
2203 if (tport->enable == val)
2204 return count;
2205
2206 if (val) {
2207 if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) {
2208 pr_err("Cannot enable a target with no LUNs!\n");
2209 return -EINVAL;
2210 }
2211 } else {
2212 /* XXX: force-shutdown sessions instead? */
2213 spin_lock_bh(&se_tpg->session_lock);
2214 if (!list_empty(&se_tpg->tpg_sess_list)) {
2215 spin_unlock_bh(&se_tpg->session_lock);
2216 return -EBUSY;
2217 }
2218 spin_unlock_bh(&se_tpg->session_lock);
2219 }
2220
2221 tport->enable = val;
2222
2223 ret = sbp_update_unit_directory(tport);
2224 if (ret < 0) {
2225 pr_err("Could not update Config ROM\n");
2226 return ret;
2227 }
2228
2229 return count;
2230}
2231
2232TF_TPG_BASE_ATTR(sbp, directory_id, S_IRUGO | S_IWUSR);
2233TF_TPG_BASE_ATTR(sbp, enable, S_IRUGO | S_IWUSR);
2234
2235static struct configfs_attribute *sbp_tpg_base_attrs[] = {
2236 &sbp_tpg_directory_id.attr,
2237 &sbp_tpg_enable.attr,
2238 NULL,
2239};
2240
2241static ssize_t sbp_tpg_attrib_show_mgt_orb_timeout(
2242 struct se_portal_group *se_tpg,
2243 char *page)
2244{
2245 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2246 struct sbp_tport *tport = tpg->tport;
2247 return sprintf(page, "%d\n", tport->mgt_orb_timeout);
2248}
2249
2250static ssize_t sbp_tpg_attrib_store_mgt_orb_timeout(
2251 struct se_portal_group *se_tpg,
2252 const char *page,
2253 size_t count)
2254{
2255 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2256 struct sbp_tport *tport = tpg->tport;
2257 unsigned long val;
2258 int ret;
2259
2260 if (kstrtoul(page, 0, &val) < 0)
2261 return -EINVAL;
2262 if ((val < 1) || (val > 127))
2263 return -EINVAL;
2264
2265 if (tport->mgt_orb_timeout == val)
2266 return count;
2267
2268 tport->mgt_orb_timeout = val;
2269
2270 ret = sbp_update_unit_directory(tport);
2271 if (ret < 0)
2272 return ret;
2273
2274 return count;
2275}
2276
2277static ssize_t sbp_tpg_attrib_show_max_reconnect_timeout(
2278 struct se_portal_group *se_tpg,
2279 char *page)
2280{
2281 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2282 struct sbp_tport *tport = tpg->tport;
2283 return sprintf(page, "%d\n", tport->max_reconnect_timeout);
2284}
2285
2286static ssize_t sbp_tpg_attrib_store_max_reconnect_timeout(
2287 struct se_portal_group *se_tpg,
2288 const char *page,
2289 size_t count)
2290{
2291 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2292 struct sbp_tport *tport = tpg->tport;
2293 unsigned long val;
2294 int ret;
2295
2296 if (kstrtoul(page, 0, &val) < 0)
2297 return -EINVAL;
2298 if ((val < 1) || (val > 32767))
2299 return -EINVAL;
2300
2301 if (tport->max_reconnect_timeout == val)
2302 return count;
2303
2304 tport->max_reconnect_timeout = val;
2305
2306 ret = sbp_update_unit_directory(tport);
2307 if (ret < 0)
2308 return ret;
2309
2310 return count;
2311}
2312
2313static ssize_t sbp_tpg_attrib_show_max_logins_per_lun(
2314 struct se_portal_group *se_tpg,
2315 char *page)
2316{
2317 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2318 struct sbp_tport *tport = tpg->tport;
2319 return sprintf(page, "%d\n", tport->max_logins_per_lun);
2320}
2321
2322static ssize_t sbp_tpg_attrib_store_max_logins_per_lun(
2323 struct se_portal_group *se_tpg,
2324 const char *page,
2325 size_t count)
2326{
2327 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2328 struct sbp_tport *tport = tpg->tport;
2329 unsigned long val;
2330
2331 if (kstrtoul(page, 0, &val) < 0)
2332 return -EINVAL;
2333 if ((val < 1) || (val > 127))
2334 return -EINVAL;
2335
2336 /* XXX: also check against current count? */
2337
2338 tport->max_logins_per_lun = val;
2339
2340 return count;
2341}
2342
2343TF_TPG_ATTRIB_ATTR(sbp, mgt_orb_timeout, S_IRUGO | S_IWUSR);
2344TF_TPG_ATTRIB_ATTR(sbp, max_reconnect_timeout, S_IRUGO | S_IWUSR);
2345TF_TPG_ATTRIB_ATTR(sbp, max_logins_per_lun, S_IRUGO | S_IWUSR);
2346
2347static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
2348 &sbp_tpg_attrib_mgt_orb_timeout.attr,
2349 &sbp_tpg_attrib_max_reconnect_timeout.attr,
2350 &sbp_tpg_attrib_max_logins_per_lun.attr,
2351 NULL,
2352};
2353
9ac8928e
CH
2354static const struct target_core_fabric_ops sbp_ops = {
2355 .module = THIS_MODULE,
2356 .name = "sbp",
a511ce33 2357 .get_fabric_name = sbp_get_fabric_name,
a511ce33
CB
2358 .tpg_get_wwn = sbp_get_fabric_wwn,
2359 .tpg_get_tag = sbp_get_tag,
a511ce33
CB
2360 .tpg_check_demo_mode = sbp_check_true,
2361 .tpg_check_demo_mode_cache = sbp_check_true,
2362 .tpg_check_demo_mode_write_protect = sbp_check_false,
2363 .tpg_check_prod_mode_write_protect = sbp_check_false,
a511ce33
CB
2364 .tpg_get_inst_index = sbp_tpg_get_inst_index,
2365 .release_cmd = sbp_release_cmd,
2366 .shutdown_session = sbp_shutdown_session,
2367 .close_session = sbp_close_session,
2368 .sess_get_index = sbp_sess_get_index,
2369 .write_pending = sbp_write_pending,
2370 .write_pending_status = sbp_write_pending_status,
2371 .set_default_node_attributes = sbp_set_default_node_attrs,
a511ce33
CB
2372 .get_cmd_state = sbp_get_cmd_state,
2373 .queue_data_in = sbp_queue_data_in,
2374 .queue_status = sbp_queue_status,
2375 .queue_tm_rsp = sbp_queue_tm_rsp,
131e6abc 2376 .aborted_task = sbp_aborted_task,
a511ce33
CB
2377 .check_stop_free = sbp_check_stop_free,
2378
2379 .fabric_make_wwn = sbp_make_tport,
2380 .fabric_drop_wwn = sbp_drop_tport,
2381 .fabric_make_tpg = sbp_make_tpg,
2382 .fabric_drop_tpg = sbp_drop_tpg,
2383 .fabric_post_link = sbp_post_link_lun,
2384 .fabric_pre_unlink = sbp_pre_unlink_lun,
2385 .fabric_make_np = NULL,
2386 .fabric_drop_np = NULL,
c7d6a803 2387 .fabric_init_nodeacl = sbp_init_nodeacl,
a511ce33 2388
9ac8928e
CH
2389 .tfc_wwn_attrs = sbp_wwn_attrs,
2390 .tfc_tpg_base_attrs = sbp_tpg_base_attrs,
2391 .tfc_tpg_attrib_attrs = sbp_tpg_attrib_attrs,
a511ce33
CB
2392};
2393
2394static int __init sbp_init(void)
2395{
9ac8928e 2396 return target_register_template(&sbp_ops);
a511ce33
CB
2397};
2398
63b91d5a 2399static void __exit sbp_exit(void)
a511ce33 2400{
9ac8928e 2401 target_unregister_template(&sbp_ops);
a511ce33
CB
2402};
2403
2404MODULE_DESCRIPTION("FireWire SBP fabric driver");
2405MODULE_LICENSE("GPL");
2406module_init(sbp_init);
2407module_exit(sbp_exit);
This page took 0.304226 seconds and 5 git commands to generate.