Commit | Line | Data |
---|---|---|
853e2bd2 BG |
1 | /* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver. |
2 | * This file contains the code that low level functions that interact | |
3 | * with 57712 FCoE firmware. | |
4 | * | |
cf122191 | 5 | * Copyright (c) 2008 - 2013 Broadcom Corporation |
853e2bd2 BG |
6 | * |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License as published by | |
9 | * the Free Software Foundation. | |
10 | * | |
11 | * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) | |
12 | */ | |
13 | ||
14 | #include "bnx2fc.h" | |
15 | ||
16 | DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); | |
17 | ||
18 | static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba, | |
19 | struct fcoe_kcqe *new_cqe_kcqe); | |
20 | static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, | |
21 | struct fcoe_kcqe *ofld_kcqe); | |
22 | static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, | |
23 | struct fcoe_kcqe *ofld_kcqe); | |
24 | static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code); | |
25 | static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, | |
aea71a02 | 26 | struct fcoe_kcqe *destroy_kcqe); |
853e2bd2 BG |
27 | |
28 | int bnx2fc_send_stat_req(struct bnx2fc_hba *hba) | |
29 | { | |
30 | struct fcoe_kwqe_stat stat_req; | |
31 | struct kwqe *kwqe_arr[2]; | |
32 | int num_kwqes = 1; | |
33 | int rc = 0; | |
34 | ||
35 | memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat)); | |
36 | stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT; | |
37 | stat_req.hdr.flags = | |
38 | (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); | |
39 | ||
40 | stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma; | |
41 | stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32); | |
42 | ||
43 | kwqe_arr[0] = (struct kwqe *) &stat_req; | |
44 | ||
45 | if (hba->cnic && hba->cnic->submit_kwqes) | |
46 | rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); | |
47 | ||
48 | return rc; | |
49 | } | |
50 | ||
51 | /** | |
52 | * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w | |
53 | * | |
54 | * @hba: adapter structure pointer | |
55 | * | |
56 | * Send down FCoE firmware init KWQEs which initiates the initial handshake | |
57 | * with the f/w. | |
58 | * | |
59 | */ | |
60 | int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba) | |
61 | { | |
62 | struct fcoe_kwqe_init1 fcoe_init1; | |
63 | struct fcoe_kwqe_init2 fcoe_init2; | |
64 | struct fcoe_kwqe_init3 fcoe_init3; | |
65 | struct kwqe *kwqe_arr[3]; | |
66 | int num_kwqes = 3; | |
67 | int rc = 0; | |
68 | ||
69 | if (!hba->cnic) { | |
aea71a02 | 70 | printk(KERN_ERR PFX "hba->cnic NULL during fcoe fw init\n"); |
853e2bd2 BG |
71 | return -ENODEV; |
72 | } | |
73 | ||
74 | /* fill init1 KWQE */ | |
75 | memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1)); | |
76 | fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1; | |
77 | fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE << | |
78 | FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); | |
79 | ||
0eb43b4b | 80 | fcoe_init1.num_tasks = hba->max_tasks; |
853e2bd2 BG |
81 | fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX; |
82 | fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX; | |
83 | fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ; | |
84 | fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX; | |
85 | fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma; | |
86 | fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32); | |
87 | fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma; | |
88 | fcoe_init1.task_list_pbl_addr_hi = | |
89 | (u32) ((u64) hba->task_ctx_bd_dma >> 32); | |
1294bfe6 | 90 | fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU; |
853e2bd2 BG |
91 | |
92 | fcoe_init1.flags = (PAGE_SHIFT << | |
93 | FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT); | |
94 | ||
95 | fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG; | |
96 | ||
97 | /* fill init2 KWQE */ | |
98 | memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2)); | |
99 | fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2; | |
100 | fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE << | |
101 | FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); | |
102 | ||
619c5cb6 VZ |
103 | fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION; |
104 | fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION; | |
105 | ||
aea71a02 | 106 | |
853e2bd2 BG |
107 | fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma; |
108 | fcoe_init2.hash_tbl_pbl_addr_hi = (u32) | |
109 | ((u64) hba->hash_tbl_pbl_dma >> 32); | |
110 | ||
111 | fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma; | |
112 | fcoe_init2.t2_hash_tbl_addr_hi = (u32) | |
113 | ((u64) hba->t2_hash_tbl_dma >> 32); | |
114 | ||
115 | fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma; | |
116 | fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32) | |
117 | ((u64) hba->t2_hash_tbl_ptr_dma >> 32); | |
118 | ||
119 | fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS; | |
120 | ||
121 | /* fill init3 KWQE */ | |
122 | memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3)); | |
123 | fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3; | |
124 | fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE << | |
125 | FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); | |
126 | fcoe_init3.error_bit_map_lo = 0xffffffff; | |
127 | fcoe_init3.error_bit_map_hi = 0xffffffff; | |
128 | ||
353c2ade BPG |
129 | /* |
130 | * enable both cached connection and cached tasks | |
131 | * 0 = none, 1 = cached connection, 2 = cached tasks, 3 = both | |
132 | */ | |
133 | fcoe_init3.perf_config = 3; | |
853e2bd2 BG |
134 | |
135 | kwqe_arr[0] = (struct kwqe *) &fcoe_init1; | |
136 | kwqe_arr[1] = (struct kwqe *) &fcoe_init2; | |
137 | kwqe_arr[2] = (struct kwqe *) &fcoe_init3; | |
138 | ||
139 | if (hba->cnic && hba->cnic->submit_kwqes) | |
140 | rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); | |
141 | ||
142 | return rc; | |
143 | } | |
144 | int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba) | |
145 | { | |
146 | struct fcoe_kwqe_destroy fcoe_destroy; | |
147 | struct kwqe *kwqe_arr[2]; | |
148 | int num_kwqes = 1; | |
149 | int rc = -1; | |
150 | ||
151 | /* fill destroy KWQE */ | |
152 | memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy)); | |
153 | fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY; | |
154 | fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE << | |
155 | FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); | |
156 | kwqe_arr[0] = (struct kwqe *) &fcoe_destroy; | |
157 | ||
158 | if (hba->cnic && hba->cnic->submit_kwqes) | |
159 | rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); | |
160 | return rc; | |
161 | } | |
162 | ||
163 | /** | |
164 | * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process | |
165 | * | |
166 | * @port: port structure pointer | |
167 | * @tgt: bnx2fc_rport structure pointer | |
168 | */ | |
169 | int bnx2fc_send_session_ofld_req(struct fcoe_port *port, | |
170 | struct bnx2fc_rport *tgt) | |
171 | { | |
172 | struct fc_lport *lport = port->lport; | |
aea71a02 | 173 | struct bnx2fc_interface *interface = port->priv; |
fd8f8902 | 174 | struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); |
aea71a02 | 175 | struct bnx2fc_hba *hba = interface->hba; |
853e2bd2 BG |
176 | struct kwqe *kwqe_arr[4]; |
177 | struct fcoe_kwqe_conn_offload1 ofld_req1; | |
178 | struct fcoe_kwqe_conn_offload2 ofld_req2; | |
179 | struct fcoe_kwqe_conn_offload3 ofld_req3; | |
180 | struct fcoe_kwqe_conn_offload4 ofld_req4; | |
181 | struct fc_rport_priv *rdata = tgt->rdata; | |
182 | struct fc_rport *rport = tgt->rport; | |
183 | int num_kwqes = 4; | |
184 | u32 port_id; | |
185 | int rc = 0; | |
186 | u16 conn_id; | |
187 | ||
188 | /* Initialize offload request 1 structure */ | |
189 | memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1)); | |
190 | ||
191 | ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1; | |
192 | ofld_req1.hdr.flags = | |
193 | (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); | |
194 | ||
195 | ||
196 | conn_id = (u16)tgt->fcoe_conn_id; | |
197 | ofld_req1.fcoe_conn_id = conn_id; | |
198 | ||
199 | ||
200 | ofld_req1.sq_addr_lo = (u32) tgt->sq_dma; | |
201 | ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32); | |
202 | ||
203 | ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma; | |
204 | ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32); | |
205 | ||
206 | ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma; | |
207 | ofld_req1.rq_first_pbe_addr_hi = | |
208 | (u32)((u64) tgt->rq_dma >> 32); | |
209 | ||
210 | ofld_req1.rq_prod = 0x8000; | |
211 | ||
212 | /* Initialize offload request 2 structure */ | |
213 | memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2)); | |
214 | ||
215 | ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2; | |
216 | ofld_req2.hdr.flags = | |
217 | (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); | |
218 | ||
219 | ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size; | |
220 | ||
221 | ofld_req2.cq_addr_lo = (u32) tgt->cq_dma; | |
222 | ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32); | |
223 | ||
224 | ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma; | |
225 | ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32); | |
226 | ||
227 | ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma; | |
228 | ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32); | |
229 | ||
230 | /* Initialize offload request 3 structure */ | |
231 | memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3)); | |
232 | ||
233 | ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3; | |
234 | ofld_req3.hdr.flags = | |
235 | (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); | |
236 | ||
aea71a02 | 237 | ofld_req3.vlan_tag = interface->vlan_id << |
853e2bd2 BG |
238 | FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT; |
239 | ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT; | |
240 | ||
241 | port_id = fc_host_port_id(lport->host); | |
242 | if (port_id == 0) { | |
243 | BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n"); | |
244 | return -EINVAL; | |
245 | } | |
246 | ||
247 | /* | |
248 | * Store s_id of the initiator for further reference. This will | |
249 | * be used during disable/destroy during linkdown processing as | |
250 | * when the lport is reset, the port_id also is reset to 0 | |
251 | */ | |
252 | tgt->sid = port_id; | |
253 | ofld_req3.s_id[0] = (port_id & 0x000000FF); | |
254 | ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8; | |
255 | ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16; | |
256 | ||
257 | port_id = rport->port_id; | |
258 | ofld_req3.d_id[0] = (port_id & 0x000000FF); | |
259 | ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8; | |
260 | ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16; | |
261 | ||
262 | ofld_req3.tx_total_conc_seqs = rdata->max_seq; | |
263 | ||
264 | ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq; | |
265 | ofld_req3.rx_max_fc_pay_len = lport->mfs; | |
266 | ||
267 | ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS; | |
268 | ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS; | |
269 | ofld_req3.rx_open_seqs_exch_c3 = 1; | |
270 | ||
271 | ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma; | |
272 | ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32); | |
273 | ||
274 | /* set mul_n_port_ids supported flag to 0, until it is supported */ | |
275 | ofld_req3.flags = 0; | |
276 | /* | |
277 | ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) << | |
278 | FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT); | |
279 | */ | |
280 | /* Info from PLOGI response */ | |
281 | ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) << | |
282 | FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT); | |
283 | ||
284 | ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) << | |
285 | FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT); | |
286 | ||
b252f4c7 BPG |
287 | /* |
288 | * Info from PRLI response, this info is used for sequence level error | |
289 | * recovery support | |
290 | */ | |
291 | if (tgt->dev_type == TYPE_TAPE) { | |
292 | ofld_req3.flags |= 1 << | |
293 | FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT; | |
294 | ofld_req3.flags |= (((rdata->flags & FC_RP_FLAGS_REC_SUPPORTED) | |
295 | ? 1 : 0) << | |
296 | FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT); | |
297 | } | |
298 | ||
853e2bd2 | 299 | /* vlan flag */ |
aea71a02 | 300 | ofld_req3.flags |= (interface->vlan_enabled << |
853e2bd2 BG |
301 | FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT); |
302 | ||
d7558148 | 303 | /* C2_VALID and ACK flags are not set as they are not supported */ |
853e2bd2 BG |
304 | |
305 | ||
306 | /* Initialize offload request 4 structure */ | |
307 | memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4)); | |
308 | ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4; | |
309 | ofld_req4.hdr.flags = | |
310 | (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); | |
311 | ||
312 | ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20; | |
313 | ||
314 | ||
619c5cb6 | 315 | ofld_req4.src_mac_addr_lo[0] = port->data_src_addr[5]; |
853e2bd2 | 316 | /* local mac */ |
619c5cb6 VZ |
317 | ofld_req4.src_mac_addr_lo[1] = port->data_src_addr[4]; |
318 | ofld_req4.src_mac_addr_mid[0] = port->data_src_addr[3]; | |
319 | ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2]; | |
320 | ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1]; | |
321 | ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0]; | |
fd8f8902 | 322 | ofld_req4.dst_mac_addr_lo[0] = ctlr->dest_addr[5]; |
aea71a02 | 323 | /* fcf mac */ |
fd8f8902 RL |
324 | ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4]; |
325 | ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3]; | |
326 | ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2]; | |
327 | ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1]; | |
328 | ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0]; | |
853e2bd2 BG |
329 | |
330 | ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma; | |
331 | ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32); | |
332 | ||
333 | ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma; | |
334 | ofld_req4.confq_pbl_base_addr_hi = | |
335 | (u32)((u64) tgt->confq_pbl_dma >> 32); | |
336 | ||
337 | kwqe_arr[0] = (struct kwqe *) &ofld_req1; | |
338 | kwqe_arr[1] = (struct kwqe *) &ofld_req2; | |
339 | kwqe_arr[2] = (struct kwqe *) &ofld_req3; | |
340 | kwqe_arr[3] = (struct kwqe *) &ofld_req4; | |
341 | ||
342 | if (hba->cnic && hba->cnic->submit_kwqes) | |
343 | rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); | |
344 | ||
345 | return rc; | |
346 | } | |
347 | ||
348 | /** | |
349 | * bnx2fc_send_session_enable_req - initiates FCoE Session enablement | |
350 | * | |
351 | * @port: port structure pointer | |
352 | * @tgt: bnx2fc_rport structure pointer | |
353 | */ | |
e7f4fed5 | 354 | int bnx2fc_send_session_enable_req(struct fcoe_port *port, |
853e2bd2 BG |
355 | struct bnx2fc_rport *tgt) |
356 | { | |
357 | struct kwqe *kwqe_arr[2]; | |
aea71a02 | 358 | struct bnx2fc_interface *interface = port->priv; |
fd8f8902 | 359 | struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); |
aea71a02 | 360 | struct bnx2fc_hba *hba = interface->hba; |
853e2bd2 BG |
361 | struct fcoe_kwqe_conn_enable_disable enbl_req; |
362 | struct fc_lport *lport = port->lport; | |
363 | struct fc_rport *rport = tgt->rport; | |
364 | int num_kwqes = 1; | |
365 | int rc = 0; | |
366 | u32 port_id; | |
367 | ||
368 | memset(&enbl_req, 0x00, | |
369 | sizeof(struct fcoe_kwqe_conn_enable_disable)); | |
370 | enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN; | |
371 | enbl_req.hdr.flags = | |
372 | (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); | |
373 | ||
619c5cb6 | 374 | enbl_req.src_mac_addr_lo[0] = port->data_src_addr[5]; |
853e2bd2 | 375 | /* local mac */ |
619c5cb6 VZ |
376 | enbl_req.src_mac_addr_lo[1] = port->data_src_addr[4]; |
377 | enbl_req.src_mac_addr_mid[0] = port->data_src_addr[3]; | |
378 | enbl_req.src_mac_addr_mid[1] = port->data_src_addr[2]; | |
379 | enbl_req.src_mac_addr_hi[0] = port->data_src_addr[1]; | |
380 | enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0]; | |
381 | memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN); | |
382 | ||
fd8f8902 RL |
383 | enbl_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5]; |
384 | enbl_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4]; | |
385 | enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3]; | |
386 | enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2]; | |
387 | enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1]; | |
388 | enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0]; | |
853e2bd2 BG |
389 | |
390 | port_id = fc_host_port_id(lport->host); | |
391 | if (port_id != tgt->sid) { | |
392 | printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x," | |
393 | "sid = 0x%x\n", port_id, tgt->sid); | |
394 | port_id = tgt->sid; | |
395 | } | |
396 | enbl_req.s_id[0] = (port_id & 0x000000FF); | |
397 | enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8; | |
398 | enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16; | |
399 | ||
400 | port_id = rport->port_id; | |
401 | enbl_req.d_id[0] = (port_id & 0x000000FF); | |
402 | enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8; | |
403 | enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16; | |
aea71a02 | 404 | enbl_req.vlan_tag = interface->vlan_id << |
853e2bd2 BG |
405 | FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; |
406 | enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; | |
aea71a02 | 407 | enbl_req.vlan_flag = interface->vlan_enabled; |
853e2bd2 BG |
408 | enbl_req.context_id = tgt->context_id; |
409 | enbl_req.conn_id = tgt->fcoe_conn_id; | |
410 | ||
411 | kwqe_arr[0] = (struct kwqe *) &enbl_req; | |
412 | ||
413 | if (hba->cnic && hba->cnic->submit_kwqes) | |
414 | rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); | |
415 | return rc; | |
416 | } | |
417 | ||
418 | /** | |
419 | * bnx2fc_send_session_disable_req - initiates FCoE Session disable | |
420 | * | |
421 | * @port: port structure pointer | |
422 | * @tgt: bnx2fc_rport structure pointer | |
423 | */ | |
424 | int bnx2fc_send_session_disable_req(struct fcoe_port *port, | |
425 | struct bnx2fc_rport *tgt) | |
426 | { | |
aea71a02 | 427 | struct bnx2fc_interface *interface = port->priv; |
fd8f8902 | 428 | struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); |
aea71a02 | 429 | struct bnx2fc_hba *hba = interface->hba; |
853e2bd2 BG |
430 | struct fcoe_kwqe_conn_enable_disable disable_req; |
431 | struct kwqe *kwqe_arr[2]; | |
432 | struct fc_rport *rport = tgt->rport; | |
433 | int num_kwqes = 1; | |
434 | int rc = 0; | |
435 | u32 port_id; | |
436 | ||
437 | memset(&disable_req, 0x00, | |
438 | sizeof(struct fcoe_kwqe_conn_enable_disable)); | |
439 | disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN; | |
440 | disable_req.hdr.flags = | |
441 | (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); | |
442 | ||
619c5cb6 VZ |
443 | disable_req.src_mac_addr_lo[0] = tgt->src_addr[5]; |
444 | disable_req.src_mac_addr_lo[1] = tgt->src_addr[4]; | |
445 | disable_req.src_mac_addr_mid[0] = tgt->src_addr[3]; | |
446 | disable_req.src_mac_addr_mid[1] = tgt->src_addr[2]; | |
447 | disable_req.src_mac_addr_hi[0] = tgt->src_addr[1]; | |
448 | disable_req.src_mac_addr_hi[1] = tgt->src_addr[0]; | |
853e2bd2 | 449 | |
fd8f8902 RL |
450 | disable_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5]; |
451 | disable_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4]; | |
452 | disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3]; | |
453 | disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2]; | |
454 | disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1]; | |
455 | disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0]; | |
853e2bd2 BG |
456 | |
457 | port_id = tgt->sid; | |
458 | disable_req.s_id[0] = (port_id & 0x000000FF); | |
459 | disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8; | |
460 | disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16; | |
461 | ||
462 | ||
463 | port_id = rport->port_id; | |
464 | disable_req.d_id[0] = (port_id & 0x000000FF); | |
465 | disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8; | |
466 | disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16; | |
467 | disable_req.context_id = tgt->context_id; | |
468 | disable_req.conn_id = tgt->fcoe_conn_id; | |
aea71a02 | 469 | disable_req.vlan_tag = interface->vlan_id << |
853e2bd2 BG |
470 | FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; |
471 | disable_req.vlan_tag |= | |
472 | 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; | |
aea71a02 | 473 | disable_req.vlan_flag = interface->vlan_enabled; |
853e2bd2 BG |
474 | |
475 | kwqe_arr[0] = (struct kwqe *) &disable_req; | |
476 | ||
477 | if (hba->cnic && hba->cnic->submit_kwqes) | |
478 | rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); | |
479 | ||
480 | return rc; | |
481 | } | |
482 | ||
483 | /** | |
484 | * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy | |
485 | * | |
486 | * @port: port structure pointer | |
487 | * @tgt: bnx2fc_rport structure pointer | |
488 | */ | |
489 | int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba, | |
490 | struct bnx2fc_rport *tgt) | |
491 | { | |
492 | struct fcoe_kwqe_conn_destroy destroy_req; | |
493 | struct kwqe *kwqe_arr[2]; | |
494 | int num_kwqes = 1; | |
495 | int rc = 0; | |
496 | ||
497 | memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy)); | |
498 | destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN; | |
499 | destroy_req.hdr.flags = | |
500 | (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); | |
501 | ||
502 | destroy_req.context_id = tgt->context_id; | |
503 | destroy_req.conn_id = tgt->fcoe_conn_id; | |
504 | ||
505 | kwqe_arr[0] = (struct kwqe *) &destroy_req; | |
506 | ||
507 | if (hba->cnic && hba->cnic->submit_kwqes) | |
508 | rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); | |
509 | ||
510 | return rc; | |
511 | } | |
512 | ||
d36b3279 BPG |
513 | static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport) |
514 | { | |
515 | struct bnx2fc_lport *blport; | |
516 | ||
517 | spin_lock_bh(&hba->hba_lock); | |
518 | list_for_each_entry(blport, &hba->vports, list) { | |
519 | if (blport->lport == lport) { | |
520 | spin_unlock_bh(&hba->hba_lock); | |
521 | return true; | |
522 | } | |
523 | } | |
524 | spin_unlock_bh(&hba->hba_lock); | |
525 | return false; | |
526 | ||
527 | } | |
528 | ||
529 | ||
853e2bd2 BG |
530 | static void bnx2fc_unsol_els_work(struct work_struct *work) |
531 | { | |
532 | struct bnx2fc_unsol_els *unsol_els; | |
533 | struct fc_lport *lport; | |
d36b3279 | 534 | struct bnx2fc_hba *hba; |
853e2bd2 BG |
535 | struct fc_frame *fp; |
536 | ||
537 | unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work); | |
538 | lport = unsol_els->lport; | |
539 | fp = unsol_els->fp; | |
d36b3279 BPG |
540 | hba = unsol_els->hba; |
541 | if (is_valid_lport(hba, lport)) | |
542 | fc_exch_recv(lport, fp); | |
853e2bd2 BG |
543 | kfree(unsol_els); |
544 | } | |
545 | ||
546 | void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt, | |
547 | unsigned char *buf, | |
548 | u32 frame_len, u16 l2_oxid) | |
549 | { | |
550 | struct fcoe_port *port = tgt->port; | |
551 | struct fc_lport *lport = port->lport; | |
aea71a02 | 552 | struct bnx2fc_interface *interface = port->priv; |
853e2bd2 BG |
553 | struct bnx2fc_unsol_els *unsol_els; |
554 | struct fc_frame_header *fh; | |
555 | struct fc_frame *fp; | |
556 | struct sk_buff *skb; | |
557 | u32 payload_len; | |
558 | u32 crc; | |
559 | u8 op; | |
560 | ||
561 | ||
562 | unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC); | |
563 | if (!unsol_els) { | |
564 | BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n"); | |
565 | return; | |
566 | } | |
567 | ||
568 | BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n", | |
569 | l2_oxid, frame_len); | |
570 | ||
571 | payload_len = frame_len - sizeof(struct fc_frame_header); | |
572 | ||
573 | fp = fc_frame_alloc(lport, payload_len); | |
574 | if (!fp) { | |
575 | printk(KERN_ERR PFX "fc_frame_alloc failure\n"); | |
5c2dce26 | 576 | kfree(unsol_els); |
853e2bd2 BG |
577 | return; |
578 | } | |
579 | ||
580 | fh = (struct fc_frame_header *) fc_frame_header_get(fp); | |
581 | /* Copy FC Frame header and payload into the frame */ | |
582 | memcpy(fh, buf, frame_len); | |
583 | ||
584 | if (l2_oxid != FC_XID_UNKNOWN) | |
585 | fh->fh_ox_id = htons(l2_oxid); | |
586 | ||
587 | skb = fp_skb(fp); | |
588 | ||
589 | if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) || | |
590 | (fh->fh_r_ctl == FC_RCTL_ELS_REP)) { | |
591 | ||
592 | if (fh->fh_type == FC_TYPE_ELS) { | |
593 | op = fc_frame_payload_op(fp); | |
594 | if ((op == ELS_TEST) || (op == ELS_ESTC) || | |
595 | (op == ELS_FAN) || (op == ELS_CSU)) { | |
596 | /* | |
597 | * No need to reply for these | |
598 | * ELS requests | |
599 | */ | |
600 | printk(KERN_ERR PFX "dropping ELS 0x%x\n", op); | |
601 | kfree_skb(skb); | |
5c2dce26 | 602 | kfree(unsol_els); |
853e2bd2 BG |
603 | return; |
604 | } | |
605 | } | |
606 | crc = fcoe_fc_crc(fp); | |
607 | fc_frame_init(fp); | |
608 | fr_dev(fp) = lport; | |
609 | fr_sof(fp) = FC_SOF_I3; | |
610 | fr_eof(fp) = FC_EOF_T; | |
611 | fr_crc(fp) = cpu_to_le32(~crc); | |
612 | unsol_els->lport = lport; | |
aea71a02 | 613 | unsol_els->hba = interface->hba; |
853e2bd2 BG |
614 | unsol_els->fp = fp; |
615 | INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work); | |
616 | queue_work(bnx2fc_wq, &unsol_els->unsol_els_work); | |
617 | } else { | |
618 | BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl); | |
619 | kfree_skb(skb); | |
5c2dce26 | 620 | kfree(unsol_els); |
853e2bd2 BG |
621 | } |
622 | } | |
623 | ||
624 | static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) | |
625 | { | |
626 | u8 num_rq; | |
627 | struct fcoe_err_report_entry *err_entry; | |
628 | unsigned char *rq_data; | |
629 | unsigned char *buf = NULL, *buf1; | |
630 | int i; | |
631 | u16 xid; | |
632 | u32 frame_len, len; | |
633 | struct bnx2fc_cmd *io_req = NULL; | |
634 | struct fcoe_task_ctx_entry *task, *task_page; | |
aea71a02 BPG |
635 | struct bnx2fc_interface *interface = tgt->port->priv; |
636 | struct bnx2fc_hba *hba = interface->hba; | |
853e2bd2 BG |
637 | int task_idx, index; |
638 | int rc = 0; | |
7b594769 BPG |
639 | u64 err_warn_bit_map; |
640 | u8 err_warn = 0xff; | |
853e2bd2 BG |
641 | |
642 | ||
643 | BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe); | |
644 | switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) { | |
645 | case FCOE_UNSOLICITED_FRAME_CQE_TYPE: | |
646 | frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >> | |
647 | FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT; | |
648 | ||
649 | num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ; | |
650 | ||
68695973 | 651 | spin_lock_bh(&tgt->tgt_lock); |
853e2bd2 | 652 | rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq); |
68695973 NS |
653 | spin_unlock_bh(&tgt->tgt_lock); |
654 | ||
853e2bd2 BG |
655 | if (rq_data) { |
656 | buf = rq_data; | |
657 | } else { | |
658 | buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ), | |
659 | GFP_ATOMIC); | |
660 | ||
661 | if (!buf1) { | |
662 | BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n"); | |
663 | break; | |
664 | } | |
665 | ||
666 | for (i = 0; i < num_rq; i++) { | |
68695973 | 667 | spin_lock_bh(&tgt->tgt_lock); |
853e2bd2 BG |
668 | rq_data = (unsigned char *) |
669 | bnx2fc_get_next_rqe(tgt, 1); | |
68695973 | 670 | spin_unlock_bh(&tgt->tgt_lock); |
853e2bd2 BG |
671 | len = BNX2FC_RQ_BUF_SZ; |
672 | memcpy(buf1, rq_data, len); | |
673 | buf1 += len; | |
674 | } | |
675 | } | |
676 | bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, | |
677 | FC_XID_UNKNOWN); | |
678 | ||
679 | if (buf != rq_data) | |
680 | kfree(buf); | |
68695973 | 681 | spin_lock_bh(&tgt->tgt_lock); |
853e2bd2 | 682 | bnx2fc_return_rqe(tgt, num_rq); |
68695973 | 683 | spin_unlock_bh(&tgt->tgt_lock); |
853e2bd2 BG |
684 | break; |
685 | ||
686 | case FCOE_ERROR_DETECTION_CQE_TYPE: | |
687 | /* | |
68695973 NS |
688 | * In case of error reporting CQE a single RQ entry |
689 | * is consumed. | |
853e2bd2 BG |
690 | */ |
691 | spin_lock_bh(&tgt->tgt_lock); | |
692 | num_rq = 1; | |
693 | err_entry = (struct fcoe_err_report_entry *) | |
694 | bnx2fc_get_next_rqe(tgt, 1); | |
695 | xid = err_entry->fc_hdr.ox_id; | |
696 | BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid); | |
697 | BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n", | |
619c5cb6 VZ |
698 | err_entry->data.err_warn_bitmap_hi, |
699 | err_entry->data.err_warn_bitmap_lo); | |
853e2bd2 | 700 | BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n", |
619c5cb6 | 701 | err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); |
853e2bd2 | 702 | |
853e2bd2 | 703 | |
0eb43b4b | 704 | if (xid > hba->max_xid) { |
853e2bd2 BG |
705 | BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", |
706 | xid); | |
7b594769 | 707 | goto ret_err_rqe; |
853e2bd2 BG |
708 | } |
709 | ||
710 | task_idx = xid / BNX2FC_TASKS_PER_PAGE; | |
711 | index = xid % BNX2FC_TASKS_PER_PAGE; | |
712 | task_page = (struct fcoe_task_ctx_entry *) | |
aea71a02 | 713 | hba->task_ctx[task_idx]; |
853e2bd2 BG |
714 | task = &(task_page[index]); |
715 | ||
716 | io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; | |
7b594769 BPG |
717 | if (!io_req) |
718 | goto ret_err_rqe; | |
853e2bd2 BG |
719 | |
720 | if (io_req->cmd_type != BNX2FC_SCSI_CMD) { | |
721 | printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n"); | |
7b594769 | 722 | goto ret_err_rqe; |
853e2bd2 BG |
723 | } |
724 | ||
725 | if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP, | |
726 | &io_req->req_flags)) { | |
727 | BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in " | |
728 | "progress.. ignore unsol err\n"); | |
7b594769 BPG |
729 | goto ret_err_rqe; |
730 | } | |
731 | ||
732 | err_warn_bit_map = (u64) | |
733 | ((u64)err_entry->data.err_warn_bitmap_hi << 32) | | |
734 | (u64)err_entry->data.err_warn_bitmap_lo; | |
735 | for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) { | |
736 | if (err_warn_bit_map & (u64)((u64)1 << i)) { | |
737 | err_warn = i; | |
738 | break; | |
739 | } | |
853e2bd2 BG |
740 | } |
741 | ||
742 | /* | |
743 | * If ABTS is already in progress, and FW error is | |
744 | * received after that, do not cancel the timeout_work | |
745 | * and let the error recovery continue by explicitly | |
746 | * logging out the target, when the ABTS eventually | |
747 | * times out. | |
748 | */ | |
7b594769 | 749 | if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { |
853e2bd2 BG |
750 | printk(KERN_ERR PFX "err_warn: io_req (0x%x) already " |
751 | "in ABTS processing\n", xid); | |
7b594769 BPG |
752 | goto ret_err_rqe; |
753 | } | |
754 | BNX2FC_TGT_DBG(tgt, "err = 0x%x\n", err_warn); | |
755 | if (tgt->dev_type != TYPE_TAPE) | |
756 | goto skip_rec; | |
757 | switch (err_warn) { | |
758 | case FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION: | |
759 | case FCOE_ERROR_CODE_DATA_OOO_RO: | |
760 | case FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT: | |
761 | case FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET: | |
762 | case FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ: | |
763 | case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET: | |
764 | BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n", | |
765 | xid); | |
7b594769 BPG |
766 | memcpy(&io_req->err_entry, err_entry, |
767 | sizeof(struct fcoe_err_report_entry)); | |
768 | if (!test_bit(BNX2FC_FLAG_SRR_SENT, | |
769 | &io_req->req_flags)) { | |
770 | spin_unlock_bh(&tgt->tgt_lock); | |
771 | rc = bnx2fc_send_rec(io_req); | |
772 | spin_lock_bh(&tgt->tgt_lock); | |
773 | ||
774 | if (rc) | |
775 | goto skip_rec; | |
776 | } else | |
777 | printk(KERN_ERR PFX "SRR in progress\n"); | |
778 | goto ret_err_rqe; | |
779 | break; | |
780 | default: | |
781 | break; | |
782 | } | |
783 | ||
784 | skip_rec: | |
785 | set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags); | |
786 | /* | |
787 | * Cancel the timeout_work, as we received IO | |
788 | * completion with FW error. | |
789 | */ | |
790 | if (cancel_delayed_work(&io_req->timeout_work)) | |
791 | kref_put(&io_req->refcount, bnx2fc_cmd_release); | |
792 | ||
793 | rc = bnx2fc_initiate_abts(io_req); | |
794 | if (rc != SUCCESS) { | |
795 | printk(KERN_ERR PFX "err_warn: initiate_abts " | |
796 | "failed xid = 0x%x. issue cleanup\n", | |
797 | io_req->xid); | |
798 | bnx2fc_initiate_cleanup(io_req); | |
799 | } | |
800 | ret_err_rqe: | |
801 | bnx2fc_return_rqe(tgt, 1); | |
853e2bd2 BG |
802 | spin_unlock_bh(&tgt->tgt_lock); |
803 | break; | |
804 | ||
805 | case FCOE_WARNING_DETECTION_CQE_TYPE: | |
806 | /* | |
807 | *In case of warning reporting CQE a single RQ entry | |
808 | * is consumes. | |
809 | */ | |
68695973 | 810 | spin_lock_bh(&tgt->tgt_lock); |
853e2bd2 BG |
811 | num_rq = 1; |
812 | err_entry = (struct fcoe_err_report_entry *) | |
813 | bnx2fc_get_next_rqe(tgt, 1); | |
814 | xid = cpu_to_be16(err_entry->fc_hdr.ox_id); | |
815 | BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid); | |
816 | BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x", | |
619c5cb6 VZ |
817 | err_entry->data.err_warn_bitmap_hi, |
818 | err_entry->data.err_warn_bitmap_lo); | |
853e2bd2 | 819 | BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x", |
619c5cb6 | 820 | err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); |
853e2bd2 | 821 | |
0eb43b4b | 822 | if (xid > hba->max_xid) { |
7b594769 BPG |
823 | BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid); |
824 | goto ret_warn_rqe; | |
825 | } | |
826 | ||
827 | err_warn_bit_map = (u64) | |
828 | ((u64)err_entry->data.err_warn_bitmap_hi << 32) | | |
829 | (u64)err_entry->data.err_warn_bitmap_lo; | |
830 | for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) { | |
831 | if (err_warn_bit_map & (u64) (1 << i)) { | |
832 | err_warn = i; | |
833 | break; | |
834 | } | |
835 | } | |
836 | BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn); | |
837 | ||
838 | task_idx = xid / BNX2FC_TASKS_PER_PAGE; | |
839 | index = xid % BNX2FC_TASKS_PER_PAGE; | |
840 | task_page = (struct fcoe_task_ctx_entry *) | |
841 | interface->hba->task_ctx[task_idx]; | |
842 | task = &(task_page[index]); | |
843 | io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; | |
844 | if (!io_req) | |
845 | goto ret_warn_rqe; | |
846 | ||
847 | if (io_req->cmd_type != BNX2FC_SCSI_CMD) { | |
848 | printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n"); | |
849 | goto ret_warn_rqe; | |
850 | } | |
851 | ||
7b594769 BPG |
852 | memcpy(&io_req->err_entry, err_entry, |
853 | sizeof(struct fcoe_err_report_entry)); | |
854 | ||
855 | if (err_warn == FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION) | |
856 | /* REC_TOV is not a warning code */ | |
857 | BUG_ON(1); | |
858 | else | |
859 | BNX2FC_TGT_DBG(tgt, "Unsolicited warning\n"); | |
860 | ret_warn_rqe: | |
853e2bd2 | 861 | bnx2fc_return_rqe(tgt, 1); |
68695973 | 862 | spin_unlock_bh(&tgt->tgt_lock); |
853e2bd2 BG |
863 | break; |
864 | ||
865 | default: | |
866 | printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n"); | |
867 | break; | |
868 | } | |
869 | } | |
870 | ||
871 | void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe) | |
872 | { | |
873 | struct fcoe_task_ctx_entry *task; | |
874 | struct fcoe_task_ctx_entry *task_page; | |
875 | struct fcoe_port *port = tgt->port; | |
aea71a02 BPG |
876 | struct bnx2fc_interface *interface = port->priv; |
877 | struct bnx2fc_hba *hba = interface->hba; | |
853e2bd2 BG |
878 | struct bnx2fc_cmd *io_req; |
879 | int task_idx, index; | |
880 | u16 xid; | |
881 | u8 cmd_type; | |
882 | u8 rx_state = 0; | |
883 | u8 num_rq; | |
884 | ||
885 | spin_lock_bh(&tgt->tgt_lock); | |
886 | xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; | |
0eb43b4b | 887 | if (xid >= hba->max_tasks) { |
b2a554ff | 888 | printk(KERN_ERR PFX "ERROR:xid out of range\n"); |
853e2bd2 BG |
889 | spin_unlock_bh(&tgt->tgt_lock); |
890 | return; | |
891 | } | |
892 | task_idx = xid / BNX2FC_TASKS_PER_PAGE; | |
893 | index = xid % BNX2FC_TASKS_PER_PAGE; | |
894 | task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx]; | |
895 | task = &(task_page[index]); | |
896 | ||
619c5cb6 VZ |
897 | num_rq = ((task->rxwr_txrd.var_ctx.rx_flags & |
898 | FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >> | |
899 | FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT); | |
853e2bd2 BG |
900 | |
901 | io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; | |
902 | ||
903 | if (io_req == NULL) { | |
904 | printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n"); | |
905 | spin_unlock_bh(&tgt->tgt_lock); | |
906 | return; | |
907 | } | |
908 | ||
909 | /* Timestamp IO completion time */ | |
910 | cmd_type = io_req->cmd_type; | |
911 | ||
619c5cb6 VZ |
912 | rx_state = ((task->rxwr_txrd.var_ctx.rx_flags & |
913 | FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >> | |
914 | FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT); | |
853e2bd2 | 915 | |
619c5cb6 VZ |
916 | /* Process other IO completion types */ |
917 | switch (cmd_type) { | |
918 | case BNX2FC_SCSI_CMD: | |
853e2bd2 BG |
919 | if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) { |
920 | bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq); | |
921 | spin_unlock_bh(&tgt->tgt_lock); | |
922 | return; | |
923 | } | |
853e2bd2 | 924 | |
853e2bd2 BG |
925 | if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED) |
926 | bnx2fc_process_abts_compl(io_req, task, num_rq); | |
927 | else if (rx_state == | |
928 | FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED) | |
929 | bnx2fc_process_cleanup_compl(io_req, task, num_rq); | |
930 | else | |
931 | printk(KERN_ERR PFX "Invalid rx state - %d\n", | |
932 | rx_state); | |
933 | break; | |
934 | ||
935 | case BNX2FC_TASK_MGMT_CMD: | |
936 | BNX2FC_IO_DBG(io_req, "Processing TM complete\n"); | |
937 | bnx2fc_process_tm_compl(io_req, task, num_rq); | |
938 | break; | |
939 | ||
940 | case BNX2FC_ABTS: | |
941 | /* | |
942 | * ABTS request received by firmware. ABTS response | |
943 | * will be delivered to the task belonging to the IO | |
944 | * that was aborted | |
945 | */ | |
946 | BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n"); | |
947 | kref_put(&io_req->refcount, bnx2fc_cmd_release); | |
948 | break; | |
949 | ||
950 | case BNX2FC_ELS: | |
619c5cb6 VZ |
951 | if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) |
952 | bnx2fc_process_els_compl(io_req, task, num_rq); | |
953 | else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED) | |
954 | bnx2fc_process_abts_compl(io_req, task, num_rq); | |
955 | else if (rx_state == | |
956 | FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED) | |
957 | bnx2fc_process_cleanup_compl(io_req, task, num_rq); | |
958 | else | |
959 | printk(KERN_ERR PFX "Invalid rx state = %d\n", | |
960 | rx_state); | |
853e2bd2 BG |
961 | break; |
962 | ||
963 | case BNX2FC_CLEANUP: | |
964 | BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n"); | |
965 | kref_put(&io_req->refcount, bnx2fc_cmd_release); | |
966 | break; | |
967 | ||
6c5a7ce4 BPG |
968 | case BNX2FC_SEQ_CLEANUP: |
969 | BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n", | |
970 | io_req->xid); | |
971 | bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state); | |
972 | kref_put(&io_req->refcount, bnx2fc_cmd_release); | |
973 | break; | |
974 | ||
853e2bd2 BG |
975 | default: |
976 | printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type); | |
977 | break; | |
978 | } | |
979 | spin_unlock_bh(&tgt->tgt_lock); | |
980 | } | |
981 | ||
619c5cb6 VZ |
982 | void bnx2fc_arm_cq(struct bnx2fc_rport *tgt) |
983 | { | |
984 | struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db; | |
985 | u32 msg; | |
986 | ||
987 | wmb(); | |
988 | rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit << | |
989 | FCOE_CQE_TOGGLE_BIT_SHIFT); | |
990 | msg = *((u32 *)rx_db); | |
991 | writel(cpu_to_le32(msg), tgt->ctx_base); | |
992 | mmiowb(); | |
993 | ||
994 | } | |
995 | ||
853e2bd2 BG |
996 | struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe) |
997 | { | |
998 | struct bnx2fc_work *work; | |
999 | work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC); | |
1000 | if (!work) | |
1001 | return NULL; | |
1002 | ||
1003 | INIT_LIST_HEAD(&work->list); | |
1004 | work->tgt = tgt; | |
1005 | work->wqe = wqe; | |
1006 | return work; | |
1007 | } | |
1008 | ||
1009 | int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) | |
1010 | { | |
1011 | struct fcoe_cqe *cq; | |
1012 | u32 cq_cons; | |
1013 | struct fcoe_cqe *cqe; | |
619c5cb6 | 1014 | u32 num_free_sqes = 0; |
b338c785 | 1015 | u32 num_cqes = 0; |
853e2bd2 | 1016 | u16 wqe; |
853e2bd2 BG |
1017 | |
1018 | /* | |
1019 | * cq_lock is a low contention lock used to protect | |
1020 | * the CQ data structure from being freed up during | |
1021 | * the upload operation | |
1022 | */ | |
1023 | spin_lock_bh(&tgt->cq_lock); | |
1024 | ||
1025 | if (!tgt->cq) { | |
1026 | printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n"); | |
1027 | spin_unlock_bh(&tgt->cq_lock); | |
1028 | return 0; | |
1029 | } | |
1030 | cq = tgt->cq; | |
1031 | cq_cons = tgt->cq_cons_idx; | |
1032 | cqe = &cq[cq_cons]; | |
1033 | ||
619c5cb6 VZ |
1034 | while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) == |
1035 | (tgt->cq_curr_toggle_bit << | |
1036 | FCOE_CQE_TOGGLE_BIT_SHIFT)) { | |
853e2bd2 | 1037 | |
619c5cb6 VZ |
1038 | /* new entry on the cq */ |
1039 | if (wqe & FCOE_CQE_CQE_TYPE) { | |
1040 | /* Unsolicited event notification */ | |
1041 | bnx2fc_process_unsol_compl(tgt, wqe); | |
1042 | } else { | |
1043 | /* Pending work request completion */ | |
1044 | struct bnx2fc_work *work = NULL; | |
1045 | struct bnx2fc_percpu_s *fps = NULL; | |
1046 | unsigned int cpu = wqe % num_possible_cpus(); | |
1047 | ||
1048 | fps = &per_cpu(bnx2fc_percpu, cpu); | |
1049 | spin_lock_bh(&fps->fp_work_lock); | |
1050 | if (unlikely(!fps->iothread)) | |
1051 | goto unlock; | |
1052 | ||
1053 | work = bnx2fc_alloc_work(tgt, wqe); | |
1054 | if (work) | |
1055 | list_add_tail(&work->list, | |
1056 | &fps->work_list); | |
853e2bd2 | 1057 | unlock: |
619c5cb6 | 1058 | spin_unlock_bh(&fps->fp_work_lock); |
853e2bd2 | 1059 | |
619c5cb6 VZ |
1060 | /* Pending work request completion */ |
1061 | if (fps->iothread && work) | |
1062 | wake_up_process(fps->iothread); | |
1063 | else | |
1064 | bnx2fc_process_cq_compl(tgt, wqe); | |
b338c785 | 1065 | num_free_sqes++; |
853e2bd2 | 1066 | } |
619c5cb6 VZ |
1067 | cqe++; |
1068 | tgt->cq_cons_idx++; | |
b338c785 | 1069 | num_cqes++; |
619c5cb6 VZ |
1070 | |
1071 | if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) { | |
1072 | tgt->cq_cons_idx = 0; | |
1073 | cqe = cq; | |
1074 | tgt->cq_curr_toggle_bit = | |
1075 | 1 - tgt->cq_curr_toggle_bit; | |
853e2bd2 | 1076 | } |
619c5cb6 | 1077 | } |
b338c785 BPG |
1078 | if (num_cqes) { |
1079 | /* Arm CQ only if doorbell is mapped */ | |
1080 | if (tgt->ctx_base) | |
1081 | bnx2fc_arm_cq(tgt); | |
fd08bd62 BPG |
1082 | atomic_add(num_free_sqes, &tgt->free_sqes); |
1083 | } | |
853e2bd2 BG |
1084 | spin_unlock_bh(&tgt->cq_lock); |
1085 | return 0; | |
1086 | } | |
1087 | ||
1088 | /** | |
1089 | * bnx2fc_fastpath_notification - process global event queue (KCQ) | |
1090 | * | |
1091 | * @hba: adapter structure pointer | |
1092 | * @new_cqe_kcqe: pointer to newly DMA'd KCQ entry | |
1093 | * | |
1094 | * Fast path event notification handler | |
1095 | */ | |
1096 | static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba, | |
1097 | struct fcoe_kcqe *new_cqe_kcqe) | |
1098 | { | |
1099 | u32 conn_id = new_cqe_kcqe->fcoe_conn_id; | |
1100 | struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id]; | |
1101 | ||
1102 | if (!tgt) { | |
b2a554ff | 1103 | printk(KERN_ERR PFX "conn_id 0x%x not valid\n", conn_id); |
853e2bd2 BG |
1104 | return; |
1105 | } | |
1106 | ||
1107 | bnx2fc_process_new_cqes(tgt); | |
1108 | } | |
1109 | ||
1110 | /** | |
1111 | * bnx2fc_process_ofld_cmpl - process FCoE session offload completion | |
1112 | * | |
1113 | * @hba: adapter structure pointer | |
1114 | * @ofld_kcqe: connection offload kcqe pointer | |
1115 | * | |
1116 | * handle session offload completion, enable the session if offload is | |
1117 | * successful. | |
1118 | */ | |
1119 | static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, | |
1120 | struct fcoe_kcqe *ofld_kcqe) | |
1121 | { | |
1122 | struct bnx2fc_rport *tgt; | |
1123 | struct fcoe_port *port; | |
aea71a02 | 1124 | struct bnx2fc_interface *interface; |
853e2bd2 BG |
1125 | u32 conn_id; |
1126 | u32 context_id; | |
853e2bd2 BG |
1127 | |
1128 | conn_id = ofld_kcqe->fcoe_conn_id; | |
1129 | context_id = ofld_kcqe->fcoe_conn_context_id; | |
1130 | tgt = hba->tgt_ofld_list[conn_id]; | |
1131 | if (!tgt) { | |
aea71a02 | 1132 | printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n"); |
853e2bd2 BG |
1133 | return; |
1134 | } | |
1135 | BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n", | |
1136 | ofld_kcqe->fcoe_conn_context_id); | |
1137 | port = tgt->port; | |
aea71a02 BPG |
1138 | interface = tgt->port->priv; |
1139 | if (hba != interface->hba) { | |
1140 | printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n"); | |
853e2bd2 BG |
1141 | goto ofld_cmpl_err; |
1142 | } | |
1143 | /* | |
1144 | * cnic has allocated a context_id for this session; use this | |
1145 | * while enabling the session. | |
1146 | */ | |
1147 | tgt->context_id = context_id; | |
1148 | if (ofld_kcqe->completion_status) { | |
1149 | if (ofld_kcqe->completion_status == | |
1150 | FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) { | |
1151 | printk(KERN_ERR PFX "unable to allocate FCoE context " | |
1152 | "resources\n"); | |
1153 | set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags); | |
1154 | } | |
853e2bd2 | 1155 | } else { |
e7f4fed5 BPG |
1156 | /* FW offload request successfully completed */ |
1157 | set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); | |
853e2bd2 | 1158 | } |
853e2bd2 BG |
1159 | ofld_cmpl_err: |
1160 | set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); | |
1161 | wake_up_interruptible(&tgt->ofld_wait); | |
1162 | } | |
1163 | ||
1164 | /** | |
1165 | * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion | |
1166 | * | |
1167 | * @hba: adapter structure pointer | |
1168 | * @ofld_kcqe: connection offload kcqe pointer | |
1169 | * | |
1170 | * handle session enable completion, mark the rport as ready | |
1171 | */ | |
1172 | ||
1173 | static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, | |
1174 | struct fcoe_kcqe *ofld_kcqe) | |
1175 | { | |
1176 | struct bnx2fc_rport *tgt; | |
aea71a02 | 1177 | struct bnx2fc_interface *interface; |
853e2bd2 BG |
1178 | u32 conn_id; |
1179 | u32 context_id; | |
1180 | ||
1181 | context_id = ofld_kcqe->fcoe_conn_context_id; | |
1182 | conn_id = ofld_kcqe->fcoe_conn_id; | |
1183 | tgt = hba->tgt_ofld_list[conn_id]; | |
1184 | if (!tgt) { | |
b2a554ff | 1185 | printk(KERN_ERR PFX "ERROR:enbl_cmpl: No pending ofld req\n"); |
853e2bd2 BG |
1186 | return; |
1187 | } | |
1188 | ||
1189 | BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n", | |
1190 | ofld_kcqe->fcoe_conn_context_id); | |
1191 | ||
1192 | /* | |
1193 | * context_id should be the same for this target during offload | |
1194 | * and enable | |
1195 | */ | |
1196 | if (tgt->context_id != context_id) { | |
b2a554ff | 1197 | printk(KERN_ERR PFX "context id mis-match\n"); |
853e2bd2 BG |
1198 | return; |
1199 | } | |
aea71a02 BPG |
1200 | interface = tgt->port->priv; |
1201 | if (hba != interface->hba) { | |
1202 | printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n"); | |
853e2bd2 BG |
1203 | goto enbl_cmpl_err; |
1204 | } | |
e7f4fed5 | 1205 | if (!ofld_kcqe->completion_status) |
853e2bd2 | 1206 | /* enable successful - rport ready for issuing IOs */ |
e7f4fed5 | 1207 | set_bit(BNX2FC_FLAG_ENABLED, &tgt->flags); |
853e2bd2 BG |
1208 | |
1209 | enbl_cmpl_err: | |
1210 | set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); | |
1211 | wake_up_interruptible(&tgt->ofld_wait); | |
1212 | } | |
1213 | ||
1214 | static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba, | |
1215 | struct fcoe_kcqe *disable_kcqe) | |
1216 | { | |
1217 | ||
1218 | struct bnx2fc_rport *tgt; | |
1219 | u32 conn_id; | |
1220 | ||
1221 | conn_id = disable_kcqe->fcoe_conn_id; | |
1222 | tgt = hba->tgt_ofld_list[conn_id]; | |
1223 | if (!tgt) { | |
b2a554ff | 1224 | printk(KERN_ERR PFX "ERROR: disable_cmpl: No disable req\n"); |
853e2bd2 BG |
1225 | return; |
1226 | } | |
1227 | ||
1228 | BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id); | |
1229 | ||
1230 | if (disable_kcqe->completion_status) { | |
b2a554ff | 1231 | printk(KERN_ERR PFX "Disable failed with cmpl status %d\n", |
853e2bd2 | 1232 | disable_kcqe->completion_status); |
5c17ae21 BPG |
1233 | set_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags); |
1234 | set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); | |
1235 | wake_up_interruptible(&tgt->upld_wait); | |
853e2bd2 BG |
1236 | } else { |
1237 | /* disable successful */ | |
1238 | BNX2FC_TGT_DBG(tgt, "disable successful\n"); | |
1239 | clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); | |
e7f4fed5 | 1240 | clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags); |
853e2bd2 BG |
1241 | set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags); |
1242 | set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); | |
1243 | wake_up_interruptible(&tgt->upld_wait); | |
1244 | } | |
1245 | } | |
1246 | ||
1247 | static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, | |
1248 | struct fcoe_kcqe *destroy_kcqe) | |
1249 | { | |
1250 | struct bnx2fc_rport *tgt; | |
1251 | u32 conn_id; | |
1252 | ||
1253 | conn_id = destroy_kcqe->fcoe_conn_id; | |
1254 | tgt = hba->tgt_ofld_list[conn_id]; | |
1255 | if (!tgt) { | |
b2a554ff | 1256 | printk(KERN_ERR PFX "destroy_cmpl: No destroy req\n"); |
853e2bd2 BG |
1257 | return; |
1258 | } | |
1259 | ||
1260 | BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id); | |
1261 | ||
1262 | if (destroy_kcqe->completion_status) { | |
b2a554ff | 1263 | printk(KERN_ERR PFX "Destroy conn failed, cmpl status %d\n", |
853e2bd2 BG |
1264 | destroy_kcqe->completion_status); |
1265 | return; | |
1266 | } else { | |
1267 | /* destroy successful */ | |
1268 | BNX2FC_TGT_DBG(tgt, "upload successful\n"); | |
1269 | clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags); | |
1270 | set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags); | |
1271 | set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); | |
1272 | wake_up_interruptible(&tgt->upld_wait); | |
1273 | } | |
1274 | } | |
1275 | ||
1276 | static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code) | |
1277 | { | |
1278 | switch (err_code) { | |
1279 | case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE: | |
1280 | printk(KERN_ERR PFX "init_failure due to invalid opcode\n"); | |
1281 | break; | |
1282 | ||
1283 | case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE: | |
1284 | printk(KERN_ERR PFX "init failed due to ctx alloc failure\n"); | |
1285 | break; | |
1286 | ||
1287 | case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR: | |
1288 | printk(KERN_ERR PFX "init_failure due to NIC error\n"); | |
1289 | break; | |
619c5cb6 VZ |
1290 | case FCOE_KCQE_COMPLETION_STATUS_ERROR: |
1291 | printk(KERN_ERR PFX "init failure due to compl status err\n"); | |
1292 | break; | |
1293 | case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION: | |
1294 | printk(KERN_ERR PFX "init failure due to HSI mismatch\n"); | |
b2a554ff | 1295 | break; |
853e2bd2 BG |
1296 | default: |
1297 | printk(KERN_ERR PFX "Unknown Error code %d\n", err_code); | |
1298 | } | |
1299 | } | |
1300 | ||
1301 | /** | |
1302 | * bnx2fc_indicae_kcqe - process KCQE | |
1303 | * | |
1304 | * @hba: adapter structure pointer | |
1305 | * @kcqe: kcqe pointer | |
1306 | * @num_cqe: Number of completion queue elements | |
1307 | * | |
1308 | * Generic KCQ event handler | |
1309 | */ | |
1310 | void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[], | |
1311 | u32 num_cqe) | |
1312 | { | |
1313 | struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; | |
1314 | int i = 0; | |
1315 | struct fcoe_kcqe *kcqe = NULL; | |
1316 | ||
1317 | while (i < num_cqe) { | |
1318 | kcqe = (struct fcoe_kcqe *) kcq[i++]; | |
1319 | ||
1320 | switch (kcqe->op_code) { | |
1321 | case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION: | |
1322 | bnx2fc_fastpath_notification(hba, kcqe); | |
1323 | break; | |
1324 | ||
1325 | case FCOE_KCQE_OPCODE_OFFLOAD_CONN: | |
1326 | bnx2fc_process_ofld_cmpl(hba, kcqe); | |
1327 | break; | |
1328 | ||
1329 | case FCOE_KCQE_OPCODE_ENABLE_CONN: | |
1330 | bnx2fc_process_enable_conn_cmpl(hba, kcqe); | |
1331 | break; | |
1332 | ||
1333 | case FCOE_KCQE_OPCODE_INIT_FUNC: | |
1334 | if (kcqe->completion_status != | |
1335 | FCOE_KCQE_COMPLETION_STATUS_SUCCESS) { | |
1336 | bnx2fc_init_failure(hba, | |
1337 | kcqe->completion_status); | |
1338 | } else { | |
1339 | set_bit(ADAPTER_STATE_UP, &hba->adapter_state); | |
1340 | bnx2fc_get_link_state(hba); | |
1341 | printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n", | |
1342 | (u8)hba->pcidev->bus->number); | |
1343 | } | |
1344 | break; | |
1345 | ||
1346 | case FCOE_KCQE_OPCODE_DESTROY_FUNC: | |
1347 | if (kcqe->completion_status != | |
1348 | FCOE_KCQE_COMPLETION_STATUS_SUCCESS) { | |
1349 | ||
1350 | printk(KERN_ERR PFX "DESTROY failed\n"); | |
1351 | } else { | |
1352 | printk(KERN_ERR PFX "DESTROY success\n"); | |
1353 | } | |
aea71a02 | 1354 | set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags); |
853e2bd2 BG |
1355 | wake_up_interruptible(&hba->destroy_wait); |
1356 | break; | |
1357 | ||
1358 | case FCOE_KCQE_OPCODE_DISABLE_CONN: | |
1359 | bnx2fc_process_conn_disable_cmpl(hba, kcqe); | |
1360 | break; | |
1361 | ||
1362 | case FCOE_KCQE_OPCODE_DESTROY_CONN: | |
1363 | bnx2fc_process_conn_destroy_cmpl(hba, kcqe); | |
1364 | break; | |
1365 | ||
1366 | case FCOE_KCQE_OPCODE_STAT_FUNC: | |
1367 | if (kcqe->completion_status != | |
1368 | FCOE_KCQE_COMPLETION_STATUS_SUCCESS) | |
1369 | printk(KERN_ERR PFX "STAT failed\n"); | |
1370 | complete(&hba->stat_req_done); | |
1371 | break; | |
1372 | ||
1373 | case FCOE_KCQE_OPCODE_FCOE_ERROR: | |
1374 | /* fall thru */ | |
1375 | default: | |
b2a554ff | 1376 | printk(KERN_ERR PFX "unknown opcode 0x%x\n", |
853e2bd2 BG |
1377 | kcqe->op_code); |
1378 | } | |
1379 | } | |
1380 | } | |
1381 | ||
1382 | void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid) | |
1383 | { | |
1384 | struct fcoe_sqe *sqe; | |
1385 | ||
1386 | sqe = &tgt->sq[tgt->sq_prod_idx]; | |
1387 | ||
1388 | /* Fill SQ WQE */ | |
1389 | sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT; | |
1390 | sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT; | |
1391 | ||
1392 | /* Advance SQ Prod Idx */ | |
1393 | if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) { | |
1394 | tgt->sq_prod_idx = 0; | |
1395 | tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit; | |
1396 | } | |
1397 | } | |
1398 | ||
1399 | void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt) | |
1400 | { | |
619c5cb6 | 1401 | struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db; |
853e2bd2 BG |
1402 | u32 msg; |
1403 | ||
1404 | wmb(); | |
619c5cb6 | 1405 | sq_db->prod = tgt->sq_prod_idx | |
853e2bd2 | 1406 | (tgt->sq_curr_toggle_bit << 15); |
619c5cb6 | 1407 | msg = *((u32 *)sq_db); |
853e2bd2 | 1408 | writel(cpu_to_le32(msg), tgt->ctx_base); |
853e2bd2 BG |
1409 | mmiowb(); |
1410 | ||
1411 | } | |
1412 | ||
1413 | int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt) | |
1414 | { | |
1415 | u32 context_id = tgt->context_id; | |
1416 | struct fcoe_port *port = tgt->port; | |
1417 | u32 reg_off; | |
1418 | resource_size_t reg_base; | |
aea71a02 BPG |
1419 | struct bnx2fc_interface *interface = port->priv; |
1420 | struct bnx2fc_hba *hba = interface->hba; | |
853e2bd2 BG |
1421 | |
1422 | reg_base = pci_resource_start(hba->pcidev, | |
1423 | BNX2X_DOORBELL_PCI_BAR); | |
f78afb35 | 1424 | reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF); |
853e2bd2 BG |
1425 | tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4); |
1426 | if (!tgt->ctx_base) | |
1427 | return -ENOMEM; | |
1428 | return 0; | |
1429 | } | |
1430 | ||
1431 | char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items) | |
1432 | { | |
1433 | char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ); | |
1434 | ||
1435 | if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX) | |
1436 | return NULL; | |
1437 | ||
1438 | tgt->rq_cons_idx += num_items; | |
1439 | ||
1440 | if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX) | |
1441 | tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX; | |
1442 | ||
1443 | return buf; | |
1444 | } | |
1445 | ||
1446 | void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items) | |
1447 | { | |
1448 | /* return the rq buffer */ | |
1449 | u32 next_prod_idx = tgt->rq_prod_idx + num_items; | |
1450 | if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) { | |
1451 | /* Wrap around RQ */ | |
1452 | next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX; | |
1453 | } | |
1454 | tgt->rq_prod_idx = next_prod_idx; | |
1455 | tgt->conn_db->rq_prod = tgt->rq_prod_idx; | |
1456 | } | |
1457 | ||
6c5a7ce4 BPG |
1458 | void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req, |
1459 | struct fcoe_task_ctx_entry *task, | |
1460 | struct bnx2fc_cmd *orig_io_req, | |
1461 | u32 offset) | |
1462 | { | |
1463 | struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd; | |
1464 | struct bnx2fc_rport *tgt = seq_clnp_req->tgt; | |
1465 | struct bnx2fc_interface *interface = tgt->port->priv; | |
1466 | struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl; | |
1467 | struct fcoe_task_ctx_entry *orig_task; | |
1468 | struct fcoe_task_ctx_entry *task_page; | |
1469 | struct fcoe_ext_mul_sges_ctx *sgl; | |
1470 | u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP; | |
1471 | u8 orig_task_type; | |
1472 | u16 orig_xid = orig_io_req->xid; | |
1473 | u32 context_id = tgt->context_id; | |
1474 | u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma; | |
1475 | u32 orig_offset = offset; | |
1476 | int bd_count; | |
1477 | int orig_task_idx, index; | |
1478 | int i; | |
1479 | ||
1480 | memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); | |
1481 | ||
1482 | if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) | |
1483 | orig_task_type = FCOE_TASK_TYPE_WRITE; | |
1484 | else | |
1485 | orig_task_type = FCOE_TASK_TYPE_READ; | |
1486 | ||
1487 | /* Tx flags */ | |
1488 | task->txwr_rxrd.const_ctx.tx_flags = | |
1489 | FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP << | |
1490 | FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; | |
1491 | /* init flags */ | |
1492 | task->txwr_rxrd.const_ctx.init_flags = task_type << | |
1493 | FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; | |
1494 | task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << | |
1495 | FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; | |
1496 | task->rxwr_txrd.const_ctx.init_flags = context_id << | |
1497 | FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; | |
1498 | task->rxwr_txrd.const_ctx.init_flags = context_id << | |
1499 | FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; | |
1500 | ||
1501 | task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid; | |
1502 | ||
1503 | task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0; | |
1504 | task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset; | |
1505 | ||
1506 | bd_count = orig_io_req->bd_tbl->bd_valid; | |
1507 | ||
1508 | /* obtain the appropriate bd entry from relative offset */ | |
1509 | for (i = 0; i < bd_count; i++) { | |
1510 | if (offset < bd[i].buf_len) | |
1511 | break; | |
1512 | offset -= bd[i].buf_len; | |
1513 | } | |
1514 | phys_addr += (i * sizeof(struct fcoe_bd_ctx)); | |
1515 | ||
1516 | if (orig_task_type == FCOE_TASK_TYPE_WRITE) { | |
1517 | task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = | |
1518 | (u32)phys_addr; | |
1519 | task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = | |
1520 | (u32)((u64)phys_addr >> 32); | |
1521 | task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = | |
1522 | bd_count; | |
1523 | task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off = | |
1524 | offset; /* adjusted offset */ | |
1525 | task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i; | |
1526 | } else { | |
1527 | orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE; | |
1528 | index = orig_xid % BNX2FC_TASKS_PER_PAGE; | |
1529 | ||
1530 | task_page = (struct fcoe_task_ctx_entry *) | |
1531 | interface->hba->task_ctx[orig_task_idx]; | |
1532 | orig_task = &(task_page[index]); | |
1533 | ||
1534 | /* Multiple SGEs were used for this IO */ | |
1535 | sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; | |
1536 | sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr; | |
1537 | sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32); | |
1538 | sgl->mul_sgl.sgl_size = bd_count; | |
1539 | sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */ | |
1540 | sgl->mul_sgl.cur_sge_idx = i; | |
1541 | ||
1542 | memset(&task->rxwr_only.rx_seq_ctx, 0, | |
1543 | sizeof(struct fcoe_rx_seq_ctx)); | |
1544 | task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset; | |
1545 | task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset; | |
1546 | } | |
1547 | } | |
853e2bd2 BG |
1548 | void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, |
1549 | struct fcoe_task_ctx_entry *task, | |
1550 | u16 orig_xid) | |
1551 | { | |
1552 | u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP; | |
1553 | struct bnx2fc_rport *tgt = io_req->tgt; | |
1554 | u32 context_id = tgt->context_id; | |
1555 | ||
1556 | memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); | |
1557 | ||
1558 | /* Tx Write Rx Read */ | |
619c5cb6 VZ |
1559 | /* init flags */ |
1560 | task->txwr_rxrd.const_ctx.init_flags = task_type << | |
1561 | FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; | |
1562 | task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << | |
1563 | FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; | |
f3820b71 BPG |
1564 | if (tgt->dev_type == TYPE_TAPE) |
1565 | task->txwr_rxrd.const_ctx.init_flags |= | |
1566 | FCOE_TASK_DEV_TYPE_TAPE << | |
1567 | FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; | |
1568 | else | |
1569 | task->txwr_rxrd.const_ctx.init_flags |= | |
619c5cb6 VZ |
1570 | FCOE_TASK_DEV_TYPE_DISK << |
1571 | FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; | |
1572 | task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid; | |
1573 | ||
1574 | /* Tx flags */ | |
1575 | task->txwr_rxrd.const_ctx.tx_flags = | |
1576 | FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP << | |
1577 | FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; | |
1578 | ||
1579 | /* Rx Read Tx Write */ | |
1580 | task->rxwr_txrd.const_ctx.init_flags = context_id << | |
1581 | FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; | |
1582 | task->rxwr_txrd.var_ctx.rx_flags |= 1 << | |
1583 | FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; | |
853e2bd2 BG |
1584 | } |
1585 | ||
1586 | void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, | |
1587 | struct fcoe_task_ctx_entry *task) | |
1588 | { | |
1589 | struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); | |
1590 | struct bnx2fc_rport *tgt = io_req->tgt; | |
1591 | struct fc_frame_header *fc_hdr; | |
619c5cb6 | 1592 | struct fcoe_ext_mul_sges_ctx *sgl; |
853e2bd2 BG |
1593 | u8 task_type = 0; |
1594 | u64 *hdr; | |
1595 | u64 temp_hdr[3]; | |
1596 | u32 context_id; | |
1597 | ||
1598 | ||
1599 | /* Obtain task_type */ | |
1600 | if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) || | |
1601 | (io_req->cmd_type == BNX2FC_ELS)) { | |
1602 | task_type = FCOE_TASK_TYPE_MIDPATH; | |
1603 | } else if (io_req->cmd_type == BNX2FC_ABTS) { | |
1604 | task_type = FCOE_TASK_TYPE_ABTS; | |
1605 | } | |
1606 | ||
1607 | memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); | |
1608 | ||
1609 | /* Setup the task from io_req for easy reference */ | |
1610 | io_req->task = task; | |
1611 | ||
1612 | BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n", | |
1613 | io_req->cmd_type, task_type); | |
1614 | ||
1615 | /* Tx only */ | |
1616 | if ((task_type == FCOE_TASK_TYPE_MIDPATH) || | |
1617 | (task_type == FCOE_TASK_TYPE_UNSOLICITED)) { | |
619c5cb6 | 1618 | task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = |
853e2bd2 | 1619 | (u32)mp_req->mp_req_bd_dma; |
619c5cb6 | 1620 | task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = |
853e2bd2 | 1621 | (u32)((u64)mp_req->mp_req_bd_dma >> 32); |
619c5cb6 | 1622 | task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1; |
853e2bd2 BG |
1623 | } |
1624 | ||
1625 | /* Tx Write Rx Read */ | |
619c5cb6 VZ |
1626 | /* init flags */ |
1627 | task->txwr_rxrd.const_ctx.init_flags = task_type << | |
1628 | FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; | |
f3820b71 BPG |
1629 | if (tgt->dev_type == TYPE_TAPE) |
1630 | task->txwr_rxrd.const_ctx.init_flags |= | |
1631 | FCOE_TASK_DEV_TYPE_TAPE << | |
1632 | FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; | |
1633 | else | |
1634 | task->txwr_rxrd.const_ctx.init_flags |= | |
619c5cb6 VZ |
1635 | FCOE_TASK_DEV_TYPE_DISK << |
1636 | FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; | |
1637 | task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << | |
1638 | FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; | |
1639 | ||
1640 | /* tx flags */ | |
1641 | task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT << | |
1642 | FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; | |
853e2bd2 BG |
1643 | |
1644 | /* Rx Write Tx Read */ | |
619c5cb6 VZ |
1645 | task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len; |
1646 | ||
1647 | /* rx flags */ | |
1648 | task->rxwr_txrd.var_ctx.rx_flags |= 1 << | |
1649 | FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; | |
1650 | ||
1651 | context_id = tgt->context_id; | |
1652 | task->rxwr_txrd.const_ctx.init_flags = context_id << | |
1653 | FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; | |
1654 | ||
853e2bd2 BG |
1655 | fc_hdr = &(mp_req->req_fc_hdr); |
1656 | if (task_type == FCOE_TASK_TYPE_MIDPATH) { | |
1657 | fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid); | |
1658 | fc_hdr->fh_rx_id = htons(0xffff); | |
619c5cb6 | 1659 | task->rxwr_txrd.var_ctx.rx_id = 0xffff; |
853e2bd2 BG |
1660 | } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) { |
1661 | fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid); | |
1662 | } | |
1663 | ||
1664 | /* Fill FC Header into middle path buffer */ | |
619c5cb6 | 1665 | hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr; |
853e2bd2 BG |
1666 | memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr)); |
1667 | hdr[0] = cpu_to_be64(temp_hdr[0]); | |
1668 | hdr[1] = cpu_to_be64(temp_hdr[1]); | |
1669 | hdr[2] = cpu_to_be64(temp_hdr[2]); | |
1670 | ||
1671 | /* Rx Only */ | |
1672 | if (task_type == FCOE_TASK_TYPE_MIDPATH) { | |
619c5cb6 | 1673 | sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; |
853e2bd2 | 1674 | |
619c5cb6 VZ |
1675 | sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma; |
1676 | sgl->mul_sgl.cur_sge_addr.hi = | |
853e2bd2 | 1677 | (u32)((u64)mp_req->mp_resp_bd_dma >> 32); |
619c5cb6 | 1678 | sgl->mul_sgl.sgl_size = 1; |
853e2bd2 BG |
1679 | } |
1680 | } | |
1681 | ||
1682 | void bnx2fc_init_task(struct bnx2fc_cmd *io_req, | |
1683 | struct fcoe_task_ctx_entry *task) | |
1684 | { | |
1685 | u8 task_type; | |
1686 | struct scsi_cmnd *sc_cmd = io_req->sc_cmd; | |
1687 | struct io_bdt *bd_tbl = io_req->bd_tbl; | |
1688 | struct bnx2fc_rport *tgt = io_req->tgt; | |
619c5cb6 VZ |
1689 | struct fcoe_cached_sge_ctx *cached_sge; |
1690 | struct fcoe_ext_mul_sges_ctx *sgl; | |
f3820b71 | 1691 | int dev_type = tgt->dev_type; |
853e2bd2 BG |
1692 | u64 *fcp_cmnd; |
1693 | u64 tmp_fcp_cmnd[4]; | |
1694 | u32 context_id; | |
1695 | int cnt, i; | |
1696 | int bd_count; | |
1697 | ||
1698 | memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); | |
1699 | ||
1700 | /* Setup the task from io_req for easy reference */ | |
1701 | io_req->task = task; | |
1702 | ||
1703 | if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) | |
1704 | task_type = FCOE_TASK_TYPE_WRITE; | |
1705 | else | |
1706 | task_type = FCOE_TASK_TYPE_READ; | |
1707 | ||
1708 | /* Tx only */ | |
3c75108f | 1709 | bd_count = bd_tbl->bd_valid; |
1101a0d8 | 1710 | cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge; |
853e2bd2 | 1711 | if (task_type == FCOE_TASK_TYPE_WRITE) { |
3c75108f BPG |
1712 | if ((dev_type == TYPE_DISK) && (bd_count == 1)) { |
1713 | struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; | |
1714 | ||
1715 | task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo = | |
1101a0d8 | 1716 | cached_sge->cur_buf_addr.lo = |
3c75108f BPG |
1717 | fcoe_bd_tbl->buf_addr_lo; |
1718 | task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi = | |
1101a0d8 | 1719 | cached_sge->cur_buf_addr.hi = |
3c75108f BPG |
1720 | fcoe_bd_tbl->buf_addr_hi; |
1721 | task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem = | |
1101a0d8 | 1722 | cached_sge->cur_buf_rem = |
3c75108f BPG |
1723 | fcoe_bd_tbl->buf_len; |
1724 | ||
1725 | task->txwr_rxrd.const_ctx.init_flags |= 1 << | |
1726 | FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; | |
1727 | } else { | |
1728 | task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = | |
1729 | (u32)bd_tbl->bd_tbl_dma; | |
1730 | task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = | |
1731 | (u32)((u64)bd_tbl->bd_tbl_dma >> 32); | |
1732 | task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = | |
1733 | bd_tbl->bd_valid; | |
1734 | } | |
853e2bd2 BG |
1735 | } |
1736 | ||
1737 | /*Tx Write Rx Read */ | |
1738 | /* Init state to NORMAL */ | |
3c75108f | 1739 | task->txwr_rxrd.const_ctx.init_flags |= task_type << |
619c5cb6 | 1740 | FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; |
c1c16bd5 | 1741 | if (dev_type == TYPE_TAPE) { |
f3820b71 BPG |
1742 | task->txwr_rxrd.const_ctx.init_flags |= |
1743 | FCOE_TASK_DEV_TYPE_TAPE << | |
1744 | FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; | |
c1c16bd5 BPG |
1745 | io_req->rec_retry = 0; |
1746 | io_req->rec_retry = 0; | |
1747 | } else | |
f3820b71 | 1748 | task->txwr_rxrd.const_ctx.init_flags |= |
619c5cb6 VZ |
1749 | FCOE_TASK_DEV_TYPE_DISK << |
1750 | FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; | |
1751 | task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << | |
1752 | FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; | |
1753 | /* tx flags */ | |
1754 | task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL << | |
1755 | FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; | |
853e2bd2 BG |
1756 | |
1757 | /* Set initial seq counter */ | |
619c5cb6 | 1758 | task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1; |
853e2bd2 BG |
1759 | |
1760 | /* Fill FCP_CMND IU */ | |
1761 | fcp_cmnd = (u64 *) | |
619c5cb6 | 1762 | task->txwr_rxrd.union_ctx.fcp_cmd.opaque; |
853e2bd2 BG |
1763 | bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd); |
1764 | ||
1765 | /* swap fcp_cmnd */ | |
1766 | cnt = sizeof(struct fcp_cmnd) / sizeof(u64); | |
1767 | ||
1768 | for (i = 0; i < cnt; i++) { | |
1769 | *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]); | |
1770 | fcp_cmnd++; | |
1771 | } | |
1772 | ||
1773 | /* Rx Write Tx Read */ | |
619c5cb6 VZ |
1774 | task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len; |
1775 | ||
1776 | context_id = tgt->context_id; | |
1777 | task->rxwr_txrd.const_ctx.init_flags = context_id << | |
1778 | FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; | |
1779 | ||
1780 | /* rx flags */ | |
1781 | /* Set state to "waiting for the first packet" */ | |
1782 | task->rxwr_txrd.var_ctx.rx_flags |= 1 << | |
1783 | FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; | |
1784 | ||
1785 | task->rxwr_txrd.var_ctx.rx_id = 0xffff; | |
853e2bd2 BG |
1786 | |
1787 | /* Rx Only */ | |
1101a0d8 BPG |
1788 | if (task_type != FCOE_TASK_TYPE_READ) |
1789 | return; | |
1790 | ||
619c5cb6 VZ |
1791 | sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; |
1792 | bd_count = bd_tbl->bd_valid; | |
1101a0d8 BPG |
1793 | |
1794 | if (dev_type == TYPE_DISK) { | |
853e2bd2 BG |
1795 | if (bd_count == 1) { |
1796 | ||
1797 | struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; | |
1798 | ||
619c5cb6 VZ |
1799 | cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo; |
1800 | cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi; | |
1801 | cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len; | |
1802 | task->txwr_rxrd.const_ctx.init_flags |= 1 << | |
1803 | FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; | |
1804 | } else if (bd_count == 2) { | |
1805 | struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; | |
1806 | ||
1807 | cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo; | |
1808 | cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi; | |
1809 | cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len; | |
1810 | ||
1811 | fcoe_bd_tbl++; | |
1812 | cached_sge->second_buf_addr.lo = | |
1813 | fcoe_bd_tbl->buf_addr_lo; | |
1814 | cached_sge->second_buf_addr.hi = | |
1815 | fcoe_bd_tbl->buf_addr_hi; | |
1816 | cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len; | |
1817 | task->txwr_rxrd.const_ctx.init_flags |= 1 << | |
1818 | FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; | |
853e2bd2 BG |
1819 | } else { |
1820 | ||
619c5cb6 VZ |
1821 | sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma; |
1822 | sgl->mul_sgl.cur_sge_addr.hi = | |
853e2bd2 | 1823 | (u32)((u64)bd_tbl->bd_tbl_dma >> 32); |
619c5cb6 | 1824 | sgl->mul_sgl.sgl_size = bd_count; |
853e2bd2 | 1825 | } |
f3820b71 BPG |
1826 | } else { |
1827 | sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma; | |
1828 | sgl->mul_sgl.cur_sge_addr.hi = | |
1829 | (u32)((u64)bd_tbl->bd_tbl_dma >> 32); | |
1830 | sgl->mul_sgl.sgl_size = bd_count; | |
853e2bd2 BG |
1831 | } |
1832 | } | |
1833 | ||
1834 | /** | |
1835 | * bnx2fc_setup_task_ctx - allocate and map task context | |
1836 | * | |
1837 | * @hba: pointer to adapter structure | |
1838 | * | |
1839 | * allocate memory for task context, and associated BD table to be used | |
1840 | * by firmware | |
1841 | * | |
1842 | */ | |
1843 | int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba) | |
1844 | { | |
1845 | int rc = 0; | |
1846 | struct regpair *task_ctx_bdt; | |
1847 | dma_addr_t addr; | |
0eb43b4b | 1848 | int task_ctx_arr_sz; |
853e2bd2 BG |
1849 | int i; |
1850 | ||
1851 | /* | |
1852 | * Allocate task context bd table. A page size of bd table | |
1853 | * can map 256 buffers. Each buffer contains 32 task context | |
1854 | * entries. Hence the limit with one page is 8192 task context | |
1855 | * entries. | |
1856 | */ | |
1857 | hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, | |
1858 | PAGE_SIZE, | |
1859 | &hba->task_ctx_bd_dma, | |
1860 | GFP_KERNEL); | |
1861 | if (!hba->task_ctx_bd_tbl) { | |
1862 | printk(KERN_ERR PFX "unable to allocate task context BDT\n"); | |
1863 | rc = -1; | |
1864 | goto out; | |
1865 | } | |
1866 | memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE); | |
1867 | ||
1868 | /* | |
1869 | * Allocate task_ctx which is an array of pointers pointing to | |
1870 | * a page containing 32 task contexts | |
1871 | */ | |
0eb43b4b BPG |
1872 | task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE); |
1873 | hba->task_ctx = kzalloc((task_ctx_arr_sz * sizeof(void *)), | |
853e2bd2 BG |
1874 | GFP_KERNEL); |
1875 | if (!hba->task_ctx) { | |
1876 | printk(KERN_ERR PFX "unable to allocate task context array\n"); | |
1877 | rc = -1; | |
1878 | goto out1; | |
1879 | } | |
1880 | ||
1881 | /* | |
1882 | * Allocate task_ctx_dma which is an array of dma addresses | |
1883 | */ | |
0eb43b4b | 1884 | hba->task_ctx_dma = kmalloc((task_ctx_arr_sz * |
853e2bd2 BG |
1885 | sizeof(dma_addr_t)), GFP_KERNEL); |
1886 | if (!hba->task_ctx_dma) { | |
1887 | printk(KERN_ERR PFX "unable to alloc context mapping array\n"); | |
1888 | rc = -1; | |
1889 | goto out2; | |
1890 | } | |
1891 | ||
1892 | task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl; | |
0eb43b4b | 1893 | for (i = 0; i < task_ctx_arr_sz; i++) { |
853e2bd2 BG |
1894 | |
1895 | hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev, | |
1896 | PAGE_SIZE, | |
1897 | &hba->task_ctx_dma[i], | |
1898 | GFP_KERNEL); | |
1899 | if (!hba->task_ctx[i]) { | |
1900 | printk(KERN_ERR PFX "unable to alloc task context\n"); | |
1901 | rc = -1; | |
1902 | goto out3; | |
1903 | } | |
1904 | memset(hba->task_ctx[i], 0, PAGE_SIZE); | |
1905 | addr = (u64)hba->task_ctx_dma[i]; | |
1906 | task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32); | |
1907 | task_ctx_bdt->lo = cpu_to_le32((u32)addr); | |
1908 | task_ctx_bdt++; | |
1909 | } | |
1910 | return 0; | |
1911 | ||
1912 | out3: | |
0eb43b4b | 1913 | for (i = 0; i < task_ctx_arr_sz; i++) { |
853e2bd2 BG |
1914 | if (hba->task_ctx[i]) { |
1915 | ||
1916 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | |
1917 | hba->task_ctx[i], hba->task_ctx_dma[i]); | |
1918 | hba->task_ctx[i] = NULL; | |
1919 | } | |
1920 | } | |
1921 | ||
1922 | kfree(hba->task_ctx_dma); | |
1923 | hba->task_ctx_dma = NULL; | |
1924 | out2: | |
1925 | kfree(hba->task_ctx); | |
1926 | hba->task_ctx = NULL; | |
1927 | out1: | |
1928 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | |
1929 | hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma); | |
1930 | hba->task_ctx_bd_tbl = NULL; | |
1931 | out: | |
1932 | return rc; | |
1933 | } | |
1934 | ||
1935 | void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba) | |
1936 | { | |
0eb43b4b | 1937 | int task_ctx_arr_sz; |
853e2bd2 BG |
1938 | int i; |
1939 | ||
1940 | if (hba->task_ctx_bd_tbl) { | |
1941 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | |
1942 | hba->task_ctx_bd_tbl, | |
1943 | hba->task_ctx_bd_dma); | |
1944 | hba->task_ctx_bd_tbl = NULL; | |
1945 | } | |
1946 | ||
0eb43b4b | 1947 | task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE); |
853e2bd2 | 1948 | if (hba->task_ctx) { |
0eb43b4b | 1949 | for (i = 0; i < task_ctx_arr_sz; i++) { |
853e2bd2 BG |
1950 | if (hba->task_ctx[i]) { |
1951 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | |
1952 | hba->task_ctx[i], | |
1953 | hba->task_ctx_dma[i]); | |
1954 | hba->task_ctx[i] = NULL; | |
1955 | } | |
1956 | } | |
1957 | kfree(hba->task_ctx); | |
1958 | hba->task_ctx = NULL; | |
1959 | } | |
1960 | ||
1961 | kfree(hba->task_ctx_dma); | |
1962 | hba->task_ctx_dma = NULL; | |
1963 | } | |
1964 | ||
1965 | static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba) | |
1966 | { | |
1967 | int i; | |
1968 | int segment_count; | |
1969 | int hash_table_size; | |
1970 | u32 *pbl; | |
1971 | ||
1972 | segment_count = hba->hash_tbl_segment_count; | |
1973 | hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL * | |
1974 | sizeof(struct fcoe_hash_table_entry); | |
1975 | ||
1976 | pbl = hba->hash_tbl_pbl; | |
1977 | for (i = 0; i < segment_count; ++i) { | |
1978 | dma_addr_t dma_address; | |
1979 | ||
1980 | dma_address = le32_to_cpu(*pbl); | |
1981 | ++pbl; | |
1982 | dma_address += ((u64)le32_to_cpu(*pbl)) << 32; | |
1983 | ++pbl; | |
1984 | dma_free_coherent(&hba->pcidev->dev, | |
1985 | BNX2FC_HASH_TBL_CHUNK_SIZE, | |
1986 | hba->hash_tbl_segments[i], | |
1987 | dma_address); | |
1988 | ||
1989 | } | |
1990 | ||
1991 | if (hba->hash_tbl_pbl) { | |
1992 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | |
1993 | hba->hash_tbl_pbl, | |
1994 | hba->hash_tbl_pbl_dma); | |
1995 | hba->hash_tbl_pbl = NULL; | |
1996 | } | |
1997 | } | |
1998 | ||
1999 | static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba) | |
2000 | { | |
2001 | int i; | |
2002 | int hash_table_size; | |
2003 | int segment_count; | |
2004 | int segment_array_size; | |
2005 | int dma_segment_array_size; | |
2006 | dma_addr_t *dma_segment_array; | |
2007 | u32 *pbl; | |
2008 | ||
2009 | hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL * | |
2010 | sizeof(struct fcoe_hash_table_entry); | |
2011 | ||
2012 | segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1; | |
2013 | segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE; | |
2014 | hba->hash_tbl_segment_count = segment_count; | |
2015 | ||
2016 | segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments); | |
2017 | hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL); | |
2018 | if (!hba->hash_tbl_segments) { | |
2019 | printk(KERN_ERR PFX "hash table pointers alloc failed\n"); | |
2020 | return -ENOMEM; | |
2021 | } | |
2022 | dma_segment_array_size = segment_count * sizeof(*dma_segment_array); | |
2023 | dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL); | |
2024 | if (!dma_segment_array) { | |
2025 | printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n"); | |
2026 | return -ENOMEM; | |
2027 | } | |
2028 | ||
2029 | for (i = 0; i < segment_count; ++i) { | |
2030 | hba->hash_tbl_segments[i] = | |
2031 | dma_alloc_coherent(&hba->pcidev->dev, | |
2032 | BNX2FC_HASH_TBL_CHUNK_SIZE, | |
2033 | &dma_segment_array[i], | |
2034 | GFP_KERNEL); | |
2035 | if (!hba->hash_tbl_segments[i]) { | |
2036 | printk(KERN_ERR PFX "hash segment alloc failed\n"); | |
2037 | while (--i >= 0) { | |
2038 | dma_free_coherent(&hba->pcidev->dev, | |
2039 | BNX2FC_HASH_TBL_CHUNK_SIZE, | |
2040 | hba->hash_tbl_segments[i], | |
2041 | dma_segment_array[i]); | |
2042 | hba->hash_tbl_segments[i] = NULL; | |
2043 | } | |
2044 | kfree(dma_segment_array); | |
2045 | return -ENOMEM; | |
2046 | } | |
2047 | memset(hba->hash_tbl_segments[i], 0, | |
2048 | BNX2FC_HASH_TBL_CHUNK_SIZE); | |
2049 | } | |
2050 | ||
2051 | hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, | |
2052 | PAGE_SIZE, | |
2053 | &hba->hash_tbl_pbl_dma, | |
2054 | GFP_KERNEL); | |
2055 | if (!hba->hash_tbl_pbl) { | |
2056 | printk(KERN_ERR PFX "hash table pbl alloc failed\n"); | |
2057 | kfree(dma_segment_array); | |
2058 | return -ENOMEM; | |
2059 | } | |
2060 | memset(hba->hash_tbl_pbl, 0, PAGE_SIZE); | |
2061 | ||
2062 | pbl = hba->hash_tbl_pbl; | |
2063 | for (i = 0; i < segment_count; ++i) { | |
2064 | u64 paddr = dma_segment_array[i]; | |
2065 | *pbl = cpu_to_le32((u32) paddr); | |
2066 | ++pbl; | |
2067 | *pbl = cpu_to_le32((u32) (paddr >> 32)); | |
2068 | ++pbl; | |
2069 | } | |
2070 | pbl = hba->hash_tbl_pbl; | |
2071 | i = 0; | |
2072 | while (*pbl && *(pbl + 1)) { | |
2073 | u32 lo; | |
2074 | u32 hi; | |
2075 | lo = *pbl; | |
2076 | ++pbl; | |
2077 | hi = *pbl; | |
2078 | ++pbl; | |
2079 | ++i; | |
2080 | } | |
2081 | kfree(dma_segment_array); | |
2082 | return 0; | |
2083 | } | |
2084 | ||
2085 | /** | |
2086 | * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer | |
2087 | * | |
2088 | * @hba: Pointer to adapter structure | |
2089 | * | |
2090 | */ | |
2091 | int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba) | |
2092 | { | |
2093 | u64 addr; | |
2094 | u32 mem_size; | |
2095 | int i; | |
2096 | ||
2097 | if (bnx2fc_allocate_hash_table(hba)) | |
2098 | return -ENOMEM; | |
2099 | ||
2100 | mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); | |
2101 | hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size, | |
2102 | &hba->t2_hash_tbl_ptr_dma, | |
2103 | GFP_KERNEL); | |
2104 | if (!hba->t2_hash_tbl_ptr) { | |
2105 | printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n"); | |
2106 | bnx2fc_free_fw_resc(hba); | |
2107 | return -ENOMEM; | |
2108 | } | |
2109 | memset(hba->t2_hash_tbl_ptr, 0x00, mem_size); | |
2110 | ||
2111 | mem_size = BNX2FC_NUM_MAX_SESS * | |
2112 | sizeof(struct fcoe_t2_hash_table_entry); | |
2113 | hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size, | |
2114 | &hba->t2_hash_tbl_dma, | |
2115 | GFP_KERNEL); | |
2116 | if (!hba->t2_hash_tbl) { | |
2117 | printk(KERN_ERR PFX "unable to allocate t2 hash table\n"); | |
2118 | bnx2fc_free_fw_resc(hba); | |
2119 | return -ENOMEM; | |
2120 | } | |
2121 | memset(hba->t2_hash_tbl, 0x00, mem_size); | |
2122 | for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) { | |
2123 | addr = (unsigned long) hba->t2_hash_tbl_dma + | |
2124 | ((i+1) * sizeof(struct fcoe_t2_hash_table_entry)); | |
2125 | hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff; | |
2126 | hba->t2_hash_tbl[i].next.hi = addr >> 32; | |
2127 | } | |
2128 | ||
2129 | hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, | |
2130 | PAGE_SIZE, &hba->dummy_buf_dma, | |
2131 | GFP_KERNEL); | |
2132 | if (!hba->dummy_buffer) { | |
2133 | printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n"); | |
2134 | bnx2fc_free_fw_resc(hba); | |
2135 | return -ENOMEM; | |
2136 | } | |
2137 | ||
2138 | hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, | |
2139 | PAGE_SIZE, | |
2140 | &hba->stats_buf_dma, | |
2141 | GFP_KERNEL); | |
2142 | if (!hba->stats_buffer) { | |
2143 | printk(KERN_ERR PFX "unable to alloc Stats Buffer\n"); | |
2144 | bnx2fc_free_fw_resc(hba); | |
2145 | return -ENOMEM; | |
2146 | } | |
2147 | memset(hba->stats_buffer, 0x00, PAGE_SIZE); | |
2148 | ||
2149 | return 0; | |
2150 | } | |
2151 | ||
2152 | void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba) | |
2153 | { | |
2154 | u32 mem_size; | |
2155 | ||
2156 | if (hba->stats_buffer) { | |
2157 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | |
2158 | hba->stats_buffer, hba->stats_buf_dma); | |
2159 | hba->stats_buffer = NULL; | |
2160 | } | |
2161 | ||
2162 | if (hba->dummy_buffer) { | |
2163 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | |
2164 | hba->dummy_buffer, hba->dummy_buf_dma); | |
2165 | hba->dummy_buffer = NULL; | |
2166 | } | |
2167 | ||
2168 | if (hba->t2_hash_tbl_ptr) { | |
2169 | mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); | |
2170 | dma_free_coherent(&hba->pcidev->dev, mem_size, | |
2171 | hba->t2_hash_tbl_ptr, | |
2172 | hba->t2_hash_tbl_ptr_dma); | |
2173 | hba->t2_hash_tbl_ptr = NULL; | |
2174 | } | |
2175 | ||
2176 | if (hba->t2_hash_tbl) { | |
2177 | mem_size = BNX2FC_NUM_MAX_SESS * | |
2178 | sizeof(struct fcoe_t2_hash_table_entry); | |
2179 | dma_free_coherent(&hba->pcidev->dev, mem_size, | |
2180 | hba->t2_hash_tbl, hba->t2_hash_tbl_dma); | |
2181 | hba->t2_hash_tbl = NULL; | |
2182 | } | |
2183 | bnx2fc_free_hash_table(hba); | |
2184 | } |