fjes: net_device_ops.ndo_open and .ndo_stop
[deliverable/linux.git] / drivers / net / fjes / fjes_hw.c
CommitLineData
8cdc3f6c
TI
1/*
2 * FUJITSU Extended Socket Network Device driver
3 * Copyright (c) 2015 FUJITSU LIMITED
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
16 *
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
19 *
20 */
21
22#include "fjes_hw.h"
23#include "fjes.h"
24
25/* supported MTU list */
26const u32 fjes_support_mtu[] = {
27 FJES_MTU_DEFINE(8 * 1024),
28 FJES_MTU_DEFINE(16 * 1024),
29 FJES_MTU_DEFINE(32 * 1024),
30 FJES_MTU_DEFINE(64 * 1024),
31 0
32};
33
34u32 fjes_hw_rd32(struct fjes_hw *hw, u32 reg)
35{
36 u8 *base = hw->base;
37 u32 value = 0;
38
39 value = readl(&base[reg]);
40
41 return value;
42}
43
44static u8 *fjes_hw_iomap(struct fjes_hw *hw)
45{
46 u8 *base;
47
48 if (!request_mem_region(hw->hw_res.start, hw->hw_res.size,
49 fjes_driver_name)) {
50 pr_err("request_mem_region failed\n");
51 return NULL;
52 }
53
54 base = (u8 *)ioremap_nocache(hw->hw_res.start, hw->hw_res.size);
55
56 return base;
57}
58
a18aaec2
TI
59static void fjes_hw_iounmap(struct fjes_hw *hw)
60{
61 iounmap(hw->base);
62 release_mem_region(hw->hw_res.start, hw->hw_res.size);
63}
64
8cdc3f6c
TI
65int fjes_hw_reset(struct fjes_hw *hw)
66{
67 union REG_DCTL dctl;
68 int timeout;
69
70 dctl.reg = 0;
71 dctl.bits.reset = 1;
72 wr32(XSCT_DCTL, dctl.reg);
73
74 timeout = FJES_DEVICE_RESET_TIMEOUT * 1000;
75 dctl.reg = rd32(XSCT_DCTL);
76 while ((dctl.bits.reset == 1) && (timeout > 0)) {
77 msleep(1000);
78 dctl.reg = rd32(XSCT_DCTL);
79 timeout -= 1000;
80 }
81
82 return timeout > 0 ? 0 : -EIO;
83}
84
85static int fjes_hw_get_max_epid(struct fjes_hw *hw)
86{
87 union REG_MAX_EP info;
88
89 info.reg = rd32(XSCT_MAX_EP);
90
91 return info.bits.maxep;
92}
93
94static int fjes_hw_get_my_epid(struct fjes_hw *hw)
95{
96 union REG_OWNER_EPID info;
97
98 info.reg = rd32(XSCT_OWNER_EPID);
99
100 return info.bits.epid;
101}
102
103static int fjes_hw_alloc_shared_status_region(struct fjes_hw *hw)
104{
105 size_t size;
106
107 size = sizeof(struct fjes_device_shared_info) +
108 (sizeof(u8) * hw->max_epid);
109 hw->hw_info.share = kzalloc(size, GFP_KERNEL);
110 if (!hw->hw_info.share)
111 return -ENOMEM;
112
113 hw->hw_info.share->epnum = hw->max_epid;
114
115 return 0;
116}
117
a18aaec2
TI
118static void fjes_hw_free_shared_status_region(struct fjes_hw *hw)
119{
120 kfree(hw->hw_info.share);
121 hw->hw_info.share = NULL;
122}
123
8cdc3f6c
TI
124static int fjes_hw_alloc_epbuf(struct epbuf_handler *epbh)
125{
126 void *mem;
127
128 mem = vzalloc(EP_BUFFER_SIZE);
129 if (!mem)
130 return -ENOMEM;
131
132 epbh->buffer = mem;
133 epbh->size = EP_BUFFER_SIZE;
134
135 epbh->info = (union ep_buffer_info *)mem;
136 epbh->ring = (u8 *)(mem + sizeof(union ep_buffer_info));
137
138 return 0;
139}
140
a18aaec2
TI
141static void fjes_hw_free_epbuf(struct epbuf_handler *epbh)
142{
143 if (epbh->buffer)
144 vfree(epbh->buffer);
145
146 epbh->buffer = NULL;
147 epbh->size = 0;
148
149 epbh->info = NULL;
150 epbh->ring = NULL;
151}
152
8cdc3f6c
TI
153void fjes_hw_setup_epbuf(struct epbuf_handler *epbh, u8 *mac_addr, u32 mtu)
154{
155 union ep_buffer_info *info = epbh->info;
156 u16 vlan_id[EP_BUFFER_SUPPORT_VLAN_MAX];
157 int i;
158
159 for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++)
160 vlan_id[i] = info->v1i.vlan_id[i];
161
162 memset(info, 0, sizeof(union ep_buffer_info));
163
164 info->v1i.version = 0; /* version 0 */
165
166 for (i = 0; i < ETH_ALEN; i++)
167 info->v1i.mac_addr[i] = mac_addr[i];
168
169 info->v1i.head = 0;
170 info->v1i.tail = 1;
171
172 info->v1i.info_size = sizeof(union ep_buffer_info);
173 info->v1i.buffer_size = epbh->size - info->v1i.info_size;
174
175 info->v1i.frame_max = FJES_MTU_TO_FRAME_SIZE(mtu);
176 info->v1i.count_max =
177 EP_RING_NUM(info->v1i.buffer_size, info->v1i.frame_max);
178
179 for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++)
180 info->v1i.vlan_id[i] = vlan_id[i];
181}
182
183void
184fjes_hw_init_command_registers(struct fjes_hw *hw,
185 struct fjes_device_command_param *param)
186{
187 /* Request Buffer length */
188 wr32(XSCT_REQBL, (__le32)(param->req_len));
189 /* Response Buffer Length */
190 wr32(XSCT_RESPBL, (__le32)(param->res_len));
191
192 /* Request Buffer Address */
193 wr32(XSCT_REQBAL,
194 (__le32)(param->req_start & GENMASK_ULL(31, 0)));
195 wr32(XSCT_REQBAH,
196 (__le32)((param->req_start & GENMASK_ULL(63, 32)) >> 32));
197
198 /* Response Buffer Address */
199 wr32(XSCT_RESPBAL,
200 (__le32)(param->res_start & GENMASK_ULL(31, 0)));
201 wr32(XSCT_RESPBAH,
202 (__le32)((param->res_start & GENMASK_ULL(63, 32)) >> 32));
203
204 /* Share status address */
205 wr32(XSCT_SHSTSAL,
206 (__le32)(param->share_start & GENMASK_ULL(31, 0)));
207 wr32(XSCT_SHSTSAH,
208 (__le32)((param->share_start & GENMASK_ULL(63, 32)) >> 32));
209}
210
211static int fjes_hw_setup(struct fjes_hw *hw)
212{
213 u8 mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
214 struct fjes_device_command_param param;
215 struct ep_share_mem_info *buf_pair;
216 size_t mem_size;
217 int result;
218 int epidx;
219 void *buf;
220
221 hw->hw_info.max_epid = &hw->max_epid;
222 hw->hw_info.my_epid = &hw->my_epid;
223
224 buf = kcalloc(hw->max_epid, sizeof(struct ep_share_mem_info),
225 GFP_KERNEL);
226 if (!buf)
227 return -ENOMEM;
228
229 hw->ep_shm_info = (struct ep_share_mem_info *)buf;
230
231 mem_size = FJES_DEV_REQ_BUF_SIZE(hw->max_epid);
232 hw->hw_info.req_buf = kzalloc(mem_size, GFP_KERNEL);
233 if (!(hw->hw_info.req_buf))
234 return -ENOMEM;
235
236 hw->hw_info.req_buf_size = mem_size;
237
238 mem_size = FJES_DEV_RES_BUF_SIZE(hw->max_epid);
239 hw->hw_info.res_buf = kzalloc(mem_size, GFP_KERNEL);
240 if (!(hw->hw_info.res_buf))
241 return -ENOMEM;
242
243 hw->hw_info.res_buf_size = mem_size;
244
245 result = fjes_hw_alloc_shared_status_region(hw);
246 if (result)
247 return result;
248
249 hw->hw_info.buffer_share_bit = 0;
250 hw->hw_info.buffer_unshare_reserve_bit = 0;
251
252 for (epidx = 0; epidx < hw->max_epid; epidx++) {
253 if (epidx != hw->my_epid) {
254 buf_pair = &hw->ep_shm_info[epidx];
255
256 result = fjes_hw_alloc_epbuf(&buf_pair->tx);
257 if (result)
258 return result;
259
260 result = fjes_hw_alloc_epbuf(&buf_pair->rx);
261 if (result)
262 return result;
263
264 fjes_hw_setup_epbuf(&buf_pair->tx, mac,
265 fjes_support_mtu[0]);
266 fjes_hw_setup_epbuf(&buf_pair->rx, mac,
267 fjes_support_mtu[0]);
268 }
269 }
270
271 memset(&param, 0, sizeof(param));
272
273 param.req_len = hw->hw_info.req_buf_size;
274 param.req_start = __pa(hw->hw_info.req_buf);
275 param.res_len = hw->hw_info.res_buf_size;
276 param.res_start = __pa(hw->hw_info.res_buf);
277
278 param.share_start = __pa(hw->hw_info.share->ep_status);
279
280 fjes_hw_init_command_registers(hw, &param);
281
282 return 0;
283}
284
a18aaec2
TI
285static void fjes_hw_cleanup(struct fjes_hw *hw)
286{
287 int epidx;
288
289 if (!hw->ep_shm_info)
290 return;
291
292 fjes_hw_free_shared_status_region(hw);
293
294 kfree(hw->hw_info.req_buf);
295 hw->hw_info.req_buf = NULL;
296
297 kfree(hw->hw_info.res_buf);
298 hw->hw_info.res_buf = NULL;
299
300 for (epidx = 0; epidx < hw->max_epid ; epidx++) {
301 if (epidx == hw->my_epid)
302 continue;
303 fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].tx);
304 fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].rx);
305 }
306
307 kfree(hw->ep_shm_info);
308 hw->ep_shm_info = NULL;
309}
310
8cdc3f6c
TI
311int fjes_hw_init(struct fjes_hw *hw)
312{
313 int ret;
314
315 hw->base = fjes_hw_iomap(hw);
316 if (!hw->base)
317 return -EIO;
318
319 ret = fjes_hw_reset(hw);
320 if (ret)
321 return ret;
322
323 fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
324
325 mutex_init(&hw->hw_info.lock);
326
327 hw->max_epid = fjes_hw_get_max_epid(hw);
328 hw->my_epid = fjes_hw_get_my_epid(hw);
329
330 if ((hw->max_epid == 0) || (hw->my_epid >= hw->max_epid))
331 return -ENXIO;
332
333 ret = fjes_hw_setup(hw);
334
335 return ret;
336}
337
a18aaec2
TI
338void fjes_hw_exit(struct fjes_hw *hw)
339{
340 int ret;
341
342 if (hw->base) {
343 ret = fjes_hw_reset(hw);
344 if (ret)
345 pr_err("%s: reset error", __func__);
346
347 fjes_hw_iounmap(hw);
348 hw->base = NULL;
349 }
350
351 fjes_hw_cleanup(hw);
352}
353
3bb025d4
TI
354static enum fjes_dev_command_response_e
355fjes_hw_issue_request_command(struct fjes_hw *hw,
356 enum fjes_dev_command_request_type type)
357{
358 enum fjes_dev_command_response_e ret = FJES_CMD_STATUS_UNKNOWN;
359 union REG_CR cr;
360 union REG_CS cs;
361 int timeout;
362
363 cr.reg = 0;
364 cr.bits.req_start = 1;
365 cr.bits.req_code = type;
366 wr32(XSCT_CR, cr.reg);
367 cr.reg = rd32(XSCT_CR);
368
369 if (cr.bits.error == 0) {
370 timeout = FJES_COMMAND_REQ_TIMEOUT * 1000;
371 cs.reg = rd32(XSCT_CS);
372
373 while ((cs.bits.complete != 1) && timeout > 0) {
374 msleep(1000);
375 cs.reg = rd32(XSCT_CS);
376 timeout -= 1000;
377 }
378
379 if (cs.bits.complete == 1)
380 ret = FJES_CMD_STATUS_NORMAL;
381 else if (timeout <= 0)
382 ret = FJES_CMD_STATUS_TIMEOUT;
383
384 } else {
385 switch (cr.bits.err_info) {
386 case FJES_CMD_REQ_ERR_INFO_PARAM:
387 ret = FJES_CMD_STATUS_ERROR_PARAM;
388 break;
389 case FJES_CMD_REQ_ERR_INFO_STATUS:
390 ret = FJES_CMD_STATUS_ERROR_STATUS;
391 break;
392 default:
393 ret = FJES_CMD_STATUS_UNKNOWN;
394 break;
395 }
396 }
397
398 return ret;
399}
400
401int fjes_hw_request_info(struct fjes_hw *hw)
402{
403 union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
404 union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
405 enum fjes_dev_command_response_e ret;
406 int result;
407
408 memset(req_buf, 0, hw->hw_info.req_buf_size);
409 memset(res_buf, 0, hw->hw_info.res_buf_size);
410
411 req_buf->info.length = FJES_DEV_COMMAND_INFO_REQ_LEN;
412
413 res_buf->info.length = 0;
414 res_buf->info.code = 0;
415
416 ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_INFO);
417
418 result = 0;
419
420 if (FJES_DEV_COMMAND_INFO_RES_LEN((*hw->hw_info.max_epid)) !=
421 res_buf->info.length) {
422 result = -ENOMSG;
423 } else if (ret == FJES_CMD_STATUS_NORMAL) {
424 switch (res_buf->info.code) {
425 case FJES_CMD_REQ_RES_CODE_NORMAL:
426 result = 0;
427 break;
428 default:
429 result = -EPERM;
430 break;
431 }
432 } else {
433 switch (ret) {
434 case FJES_CMD_STATUS_UNKNOWN:
435 result = -EPERM;
436 break;
437 case FJES_CMD_STATUS_TIMEOUT:
438 result = -EBUSY;
439 break;
440 case FJES_CMD_STATUS_ERROR_PARAM:
441 result = -EPERM;
442 break;
443 case FJES_CMD_STATUS_ERROR_STATUS:
444 result = -EPERM;
445 break;
446 default:
447 result = -EPERM;
448 break;
449 }
450 }
451
452 return result;
453}
454
7950e6c5
TI
455int fjes_hw_register_buff_addr(struct fjes_hw *hw, int dest_epid,
456 struct ep_share_mem_info *buf_pair)
457{
458 union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
459 union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
460 enum fjes_dev_command_response_e ret;
461 int page_count;
462 int timeout;
463 int i, idx;
464 void *addr;
465 int result;
466
467 if (test_bit(dest_epid, &hw->hw_info.buffer_share_bit))
468 return 0;
469
470 memset(req_buf, 0, hw->hw_info.req_buf_size);
471 memset(res_buf, 0, hw->hw_info.res_buf_size);
472
473 req_buf->share_buffer.length = FJES_DEV_COMMAND_SHARE_BUFFER_REQ_LEN(
474 buf_pair->tx.size,
475 buf_pair->rx.size);
476 req_buf->share_buffer.epid = dest_epid;
477
478 idx = 0;
479 req_buf->share_buffer.buffer[idx++] = buf_pair->tx.size;
480 page_count = buf_pair->tx.size / EP_BUFFER_INFO_SIZE;
481 for (i = 0; i < page_count; i++) {
482 addr = ((u8 *)(buf_pair->tx.buffer)) +
483 (i * EP_BUFFER_INFO_SIZE);
484 req_buf->share_buffer.buffer[idx++] =
485 (__le64)(page_to_phys(vmalloc_to_page(addr)) +
486 offset_in_page(addr));
487 }
488
489 req_buf->share_buffer.buffer[idx++] = buf_pair->rx.size;
490 page_count = buf_pair->rx.size / EP_BUFFER_INFO_SIZE;
491 for (i = 0; i < page_count; i++) {
492 addr = ((u8 *)(buf_pair->rx.buffer)) +
493 (i * EP_BUFFER_INFO_SIZE);
494 req_buf->share_buffer.buffer[idx++] =
495 (__le64)(page_to_phys(vmalloc_to_page(addr)) +
496 offset_in_page(addr));
497 }
498
499 res_buf->share_buffer.length = 0;
500 res_buf->share_buffer.code = 0;
501
502 ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_SHARE_BUFFER);
503
504 timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000;
505 while ((ret == FJES_CMD_STATUS_NORMAL) &&
506 (res_buf->share_buffer.length ==
507 FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN) &&
508 (res_buf->share_buffer.code == FJES_CMD_REQ_RES_CODE_BUSY) &&
509 (timeout > 0)) {
510 msleep(200 + hw->my_epid * 20);
511 timeout -= (200 + hw->my_epid * 20);
512
513 res_buf->share_buffer.length = 0;
514 res_buf->share_buffer.code = 0;
515
516 ret = fjes_hw_issue_request_command(
517 hw, FJES_CMD_REQ_SHARE_BUFFER);
518 }
519
520 result = 0;
521
522 if (res_buf->share_buffer.length !=
523 FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN)
524 result = -ENOMSG;
525 else if (ret == FJES_CMD_STATUS_NORMAL) {
526 switch (res_buf->share_buffer.code) {
527 case FJES_CMD_REQ_RES_CODE_NORMAL:
528 result = 0;
529 set_bit(dest_epid, &hw->hw_info.buffer_share_bit);
530 break;
531 case FJES_CMD_REQ_RES_CODE_BUSY:
532 result = -EBUSY;
533 break;
534 default:
535 result = -EPERM;
536 break;
537 }
538 } else {
539 switch (ret) {
540 case FJES_CMD_STATUS_UNKNOWN:
541 result = -EPERM;
542 break;
543 case FJES_CMD_STATUS_TIMEOUT:
544 result = -EBUSY;
545 break;
546 case FJES_CMD_STATUS_ERROR_PARAM:
547 case FJES_CMD_STATUS_ERROR_STATUS:
548 default:
549 result = -EPERM;
550 break;
551 }
552 }
553
554 return result;
555}
556
557int fjes_hw_unregister_buff_addr(struct fjes_hw *hw, int dest_epid)
558{
559 union fjes_device_command_req *req_buf = hw->hw_info.req_buf;
560 union fjes_device_command_res *res_buf = hw->hw_info.res_buf;
561 struct fjes_device_shared_info *share = hw->hw_info.share;
562 enum fjes_dev_command_response_e ret;
563 int timeout;
564 int result;
565
566 if (!hw->base)
567 return -EPERM;
568
569 if (!req_buf || !res_buf || !share)
570 return -EPERM;
571
572 if (!test_bit(dest_epid, &hw->hw_info.buffer_share_bit))
573 return 0;
574
575 memset(req_buf, 0, hw->hw_info.req_buf_size);
576 memset(res_buf, 0, hw->hw_info.res_buf_size);
577
578 req_buf->unshare_buffer.length =
579 FJES_DEV_COMMAND_UNSHARE_BUFFER_REQ_LEN;
580 req_buf->unshare_buffer.epid = dest_epid;
581
582 res_buf->unshare_buffer.length = 0;
583 res_buf->unshare_buffer.code = 0;
584
585 ret = fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER);
586
587 timeout = FJES_COMMAND_REQ_BUFF_TIMEOUT * 1000;
588 while ((ret == FJES_CMD_STATUS_NORMAL) &&
589 (res_buf->unshare_buffer.length ==
590 FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) &&
591 (res_buf->unshare_buffer.code ==
592 FJES_CMD_REQ_RES_CODE_BUSY) &&
593 (timeout > 0)) {
594 msleep(200 + hw->my_epid * 20);
595 timeout -= (200 + hw->my_epid * 20);
596
597 res_buf->unshare_buffer.length = 0;
598 res_buf->unshare_buffer.code = 0;
599
600 ret =
601 fjes_hw_issue_request_command(hw, FJES_CMD_REQ_UNSHARE_BUFFER);
602 }
603
604 result = 0;
605
606 if (res_buf->unshare_buffer.length !=
607 FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN) {
608 result = -ENOMSG;
609 } else if (ret == FJES_CMD_STATUS_NORMAL) {
610 switch (res_buf->unshare_buffer.code) {
611 case FJES_CMD_REQ_RES_CODE_NORMAL:
612 result = 0;
613 clear_bit(dest_epid, &hw->hw_info.buffer_share_bit);
614 break;
615 case FJES_CMD_REQ_RES_CODE_BUSY:
616 result = -EBUSY;
617 break;
618 default:
619 result = -EPERM;
620 break;
621 }
622 } else {
623 switch (ret) {
624 case FJES_CMD_STATUS_UNKNOWN:
625 result = -EPERM;
626 break;
627 case FJES_CMD_STATUS_TIMEOUT:
628 result = -EBUSY;
629 break;
630 case FJES_CMD_STATUS_ERROR_PARAM:
631 case FJES_CMD_STATUS_ERROR_STATUS:
632 default:
633 result = -EPERM;
634 break;
635 }
636 }
637
638 return result;
639}
640
e5d486dc
TI
641int fjes_hw_raise_interrupt(struct fjes_hw *hw, int dest_epid,
642 enum REG_ICTL_MASK mask)
643{
644 u32 ig = mask | dest_epid;
645
646 wr32(XSCT_IG, cpu_to_le32(ig));
647
648 return 0;
649}
650
651u32 fjes_hw_capture_interrupt_status(struct fjes_hw *hw)
652{
653 u32 cur_is;
654
655 cur_is = rd32(XSCT_IS);
656
657 return cur_is;
658}
659
8cdc3f6c
TI
660void fjes_hw_set_irqmask(struct fjes_hw *hw,
661 enum REG_ICTL_MASK intr_mask, bool mask)
662{
663 if (mask)
664 wr32(XSCT_IMS, intr_mask);
665 else
666 wr32(XSCT_IMC, intr_mask);
667}
e5d486dc
TI
668
669bool fjes_hw_epid_is_same_zone(struct fjes_hw *hw, int epid)
670{
671 if (epid >= hw->max_epid)
672 return false;
673
674 if ((hw->ep_shm_info[epid].es_status !=
675 FJES_ZONING_STATUS_ENABLE) ||
676 (hw->ep_shm_info[hw->my_epid].zone ==
677 FJES_ZONING_ZONE_TYPE_NONE))
678 return false;
679 else
680 return (hw->ep_shm_info[epid].zone ==
681 hw->ep_shm_info[hw->my_epid].zone);
682}
683
684int fjes_hw_epid_is_shared(struct fjes_device_shared_info *share,
685 int dest_epid)
686{
687 int value = false;
688
689 if (dest_epid < share->epnum)
690 value = share->ep_status[dest_epid];
691
692 return value;
693}
694
695static bool fjes_hw_epid_is_stop_requested(struct fjes_hw *hw, int src_epid)
696{
697 return test_bit(src_epid, &hw->txrx_stop_req_bit);
698}
699
700static bool fjes_hw_epid_is_stop_process_done(struct fjes_hw *hw, int src_epid)
701{
702 return (hw->ep_shm_info[src_epid].tx.info->v1i.rx_status &
703 FJES_RX_STOP_REQ_DONE);
704}
705
706enum ep_partner_status
707fjes_hw_get_partner_ep_status(struct fjes_hw *hw, int epid)
708{
709 enum ep_partner_status status;
710
711 if (fjes_hw_epid_is_shared(hw->hw_info.share, epid)) {
712 if (fjes_hw_epid_is_stop_requested(hw, epid)) {
713 status = EP_PARTNER_WAITING;
714 } else {
715 if (fjes_hw_epid_is_stop_process_done(hw, epid))
716 status = EP_PARTNER_COMPLETE;
717 else
718 status = EP_PARTNER_SHARED;
719 }
720 } else {
721 status = EP_PARTNER_UNSHARE;
722 }
723
724 return status;
725}
726
727void fjes_hw_raise_epstop(struct fjes_hw *hw)
728{
729 enum ep_partner_status status;
730 int epidx;
731
732 for (epidx = 0; epidx < hw->max_epid; epidx++) {
733 if (epidx == hw->my_epid)
734 continue;
735
736 status = fjes_hw_get_partner_ep_status(hw, epidx);
737 switch (status) {
738 case EP_PARTNER_SHARED:
739 fjes_hw_raise_interrupt(hw, epidx,
740 REG_ICTL_MASK_TXRX_STOP_REQ);
741 break;
742 default:
743 break;
744 }
745
746 set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
747 set_bit(epidx, &hw->txrx_stop_req_bit);
748
749 hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
750 FJES_RX_STOP_REQ_REQUEST;
751 }
752}
753
754int fjes_hw_wait_epstop(struct fjes_hw *hw)
755{
756 enum ep_partner_status status;
757 union ep_buffer_info *info;
758 int wait_time = 0;
759 int epidx;
760
761 while (hw->hw_info.buffer_unshare_reserve_bit &&
762 (wait_time < FJES_COMMAND_EPSTOP_WAIT_TIMEOUT * 1000)) {
763 for (epidx = 0; epidx < hw->max_epid; epidx++) {
764 if (epidx == hw->my_epid)
765 continue;
766 status = fjes_hw_epid_is_shared(hw->hw_info.share,
767 epidx);
768 info = hw->ep_shm_info[epidx].rx.info;
769 if ((!status ||
770 (info->v1i.rx_status &
771 FJES_RX_STOP_REQ_DONE)) &&
772 test_bit(epidx,
773 &hw->hw_info.buffer_unshare_reserve_bit)) {
774 clear_bit(epidx,
775 &hw->hw_info.buffer_unshare_reserve_bit);
776 }
777 }
778
779 msleep(100);
780 wait_time += 100;
781 }
782
783 for (epidx = 0; epidx < hw->max_epid; epidx++) {
784 if (epidx == hw->my_epid)
785 continue;
786 if (test_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit))
787 clear_bit(epidx,
788 &hw->hw_info.buffer_unshare_reserve_bit);
789 }
790
791 return (wait_time < FJES_COMMAND_EPSTOP_WAIT_TIMEOUT * 1000)
792 ? 0 : -EBUSY;
793}
This page took 0.057776 seconds and 5 git commands to generate.