NTB: Split ntb_hw_intel and ntb_transport drivers
[deliverable/linux.git] / drivers / ntb / hw / intel / ntb_hw_intel.c
CommitLineData
fce8a7bb
JM
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
e26a5843 8 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
fce8a7bb
JM
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * BSD LICENSE
15 *
16 * Copyright(c) 2012 Intel Corporation. All rights reserved.
e26a5843 17 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
fce8a7bb
JM
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
21 * are met:
22 *
23 * * Redistributions of source code must retain the above copyright
24 * notice, this list of conditions and the following disclaimer.
25 * * Redistributions in binary form must reproduce the above copy
26 * notice, this list of conditions and the following disclaimer in
27 * the documentation and/or other materials provided with the
28 * distribution.
29 * * Neither the name of Intel Corporation nor the names of its
30 * contributors may be used to endorse or promote products derived
31 * from this software without specific prior written permission.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44 *
45 * Intel PCIe NTB Linux driver
46 *
47 * Contact Information:
48 * Jon Mason <jon.mason@intel.com>
49 */
e26a5843 50
fce8a7bb 51#include <linux/debugfs.h>
113bf1c9 52#include <linux/delay.h>
fce8a7bb
JM
53#include <linux/init.h>
54#include <linux/interrupt.h>
55#include <linux/module.h>
56#include <linux/pci.h>
113bf1c9 57#include <linux/random.h>
fce8a7bb 58#include <linux/slab.h>
e26a5843
AH
59#include <linux/ntb.h>
60
ec110bc7 61#include "ntb_hw_intel.h"
fce8a7bb 62
e26a5843
AH
63#define NTB_NAME "ntb_hw_intel"
64#define NTB_DESC "Intel(R) PCI-E Non-Transparent Bridge Driver"
65#define NTB_VER "2.0"
fce8a7bb 66
e26a5843 67MODULE_DESCRIPTION(NTB_DESC);
fce8a7bb
JM
68MODULE_VERSION(NTB_VER);
69MODULE_LICENSE("Dual BSD/GPL");
70MODULE_AUTHOR("Intel Corporation");
71
e26a5843
AH
72#define bar0_off(base, bar) ((base) + ((bar) << 2))
73#define bar2_off(base, bar) bar0_off(base, (bar) - 2)
74
75static int b2b_mw_idx = -1;
76module_param(b2b_mw_idx, int, 0644);
77MODULE_PARM_DESC(b2b_mw_idx, "Use this mw idx to access the peer ntb. A "
78 "value of zero or positive starts from first mw idx, and a "
79 "negative value starts from last mw idx. Both sides MUST "
80 "set the same value here!");
81
82static unsigned int b2b_mw_share;
83module_param(b2b_mw_share, uint, 0644);
84MODULE_PARM_DESC(b2b_mw_share, "If the b2b mw is large enough, configure the "
85 "ntb so that the peer ntb only occupies the first half of "
86 "the mw, so the second half can still be used as a mw. Both "
87 "sides MUST set the same value here!");
88
89static const struct intel_ntb_reg bwd_reg;
90static const struct intel_ntb_alt_reg bwd_pri_reg;
91static const struct intel_ntb_alt_reg bwd_sec_reg;
92static const struct intel_ntb_alt_reg bwd_b2b_reg;
93static const struct intel_ntb_xlat_reg bwd_pri_xlat;
94static const struct intel_ntb_xlat_reg bwd_sec_xlat;
95static const struct intel_ntb_reg snb_reg;
96static const struct intel_ntb_alt_reg snb_pri_reg;
97static const struct intel_ntb_alt_reg snb_sec_reg;
98static const struct intel_ntb_alt_reg snb_b2b_reg;
99static const struct intel_ntb_xlat_reg snb_pri_xlat;
100static const struct intel_ntb_xlat_reg snb_sec_xlat;
101static const struct intel_b2b_addr snb_b2b_usd_addr;
102static const struct intel_b2b_addr snb_b2b_dsd_addr;
103
104static const struct ntb_dev_ops intel_ntb_ops;
105
106static const struct file_operations intel_ntb_debugfs_info;
1517a3f2
JM
107static struct dentry *debugfs_dir;
108
e26a5843
AH
109#ifndef ioread64
110#ifdef readq
111#define ioread64 readq
112#else
113#define ioread64 _ioread64
114static inline u64 _ioread64(void __iomem *mmio)
b775e85b 115{
e26a5843 116 u64 low, high;
b775e85b 117
e26a5843
AH
118 low = ioread32(mmio);
119 high = ioread32(mmio + sizeof(u32));
120 return low | (high << 32);
121}
122#endif
123#endif
124
125#ifndef iowrite64
126#ifdef writeq
127#define iowrite64 writeq
128#else
129#define iowrite64 _iowrite64
130static inline void _iowrite64(u64 val, void __iomem *mmio)
131{
132 iowrite32(val, mmio);
133 iowrite32(val >> 32, mmio + sizeof(u32));
b775e85b 134}
e26a5843
AH
135#endif
136#endif
b775e85b 137
e26a5843 138static inline int pdev_is_bwd(struct pci_dev *pdev)
b775e85b 139{
e26a5843 140 switch (pdev->device) {
b775e85b
DJ
141 case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD:
142 return 1;
b775e85b 143 }
b775e85b
DJ
144 return 0;
145}
146
e26a5843 147static inline int pdev_is_snb(struct pci_dev *pdev)
069684e8 148{
e26a5843 149 switch (pdev->device) {
069684e8
DJ
150 case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
151 case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
152 case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
153 case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
154 case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
155 case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
156 case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
157 case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
158 case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
159 case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
160 case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
161 case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
e26a5843 162 return 1;
069684e8 163 }
e26a5843 164 return 0;
069684e8
DJ
165}
166
e26a5843 167static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev)
fce8a7bb 168{
e26a5843
AH
169 ndev->unsafe_flags = 0;
170 ndev->unsafe_flags_ignore = 0;
171
172 /* Only B2B has a workaround to avoid SDOORBELL */
173 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP)
174 if (!ntb_topo_is_b2b(ndev->ntb.topo))
175 ndev->unsafe_flags |= NTB_UNSAFE_DB;
176
177 /* No low level workaround to avoid SB01BASE */
178 if (ndev->hwerr_flags & NTB_HWERR_SB01BASE_LOCKUP) {
179 ndev->unsafe_flags |= NTB_UNSAFE_DB;
180 ndev->unsafe_flags |= NTB_UNSAFE_SPAD;
181 }
fce8a7bb
JM
182}
183
e26a5843
AH
184static inline int ndev_is_unsafe(struct intel_ntb_dev *ndev,
185 unsigned long flag)
fce8a7bb 186{
e26a5843 187 return !!(flag & ndev->unsafe_flags & ~ndev->unsafe_flags_ignore);
fce8a7bb
JM
188}
189
e26a5843
AH
190static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev,
191 unsigned long flag)
e8aeb60c 192{
e26a5843
AH
193 flag &= ndev->unsafe_flags;
194 ndev->unsafe_flags_ignore |= flag;
e8aeb60c 195
e26a5843 196 return !!flag;
e8aeb60c
JM
197}
198
e26a5843 199static int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
fce8a7bb 200{
e26a5843 201 if (idx < 0 || idx > ndev->mw_count)
fce8a7bb 202 return -EINVAL;
e26a5843
AH
203 return ndev->reg->mw_bar[idx];
204}
fce8a7bb 205
e26a5843
AH
206static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
207 phys_addr_t *db_addr, resource_size_t *db_size,
208 phys_addr_t reg_addr, unsigned long reg)
209{
210 WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));
e8aeb60c 211
e26a5843
AH
212 if (db_addr) {
213 *db_addr = reg_addr + reg;
214 dev_dbg(ndev_dev(ndev), "Peer db addr %llx\n", *db_addr);
215 }
fce8a7bb 216
e26a5843
AH
217 if (db_size) {
218 *db_size = ndev->reg->db_size;
219 dev_dbg(ndev_dev(ndev), "Peer db size %llx\n", *db_size);
220 }
fce8a7bb
JM
221
222 return 0;
223}
224
e26a5843
AH
225static inline u64 ndev_db_read(struct intel_ntb_dev *ndev,
226 void __iomem *mmio)
fce8a7bb 227{
e26a5843 228 WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));
fce8a7bb 229
e26a5843
AH
230 return ndev->reg->db_ioread(mmio);
231}
232
233static inline int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
234 void __iomem *mmio)
235{
236 WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));
fce8a7bb 237
e26a5843
AH
238 if (db_bits & ~ndev->db_valid_mask)
239 return -EINVAL;
fce8a7bb 240
e26a5843 241 ndev->reg->db_iowrite(db_bits, mmio);
e8aeb60c 242
e26a5843 243 return 0;
fce8a7bb
JM
244}
245
e26a5843
AH
246static inline int ndev_db_set_mask(struct intel_ntb_dev *ndev, u64 db_bits,
247 void __iomem *mmio)
fce8a7bb 248{
e26a5843 249 unsigned long irqflags;
fce8a7bb 250
e26a5843
AH
251 WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));
252
253 if (db_bits & ~ndev->db_valid_mask)
254 return -EINVAL;
fce8a7bb 255
e26a5843
AH
256 spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
257 {
258 ndev->db_mask |= db_bits;
259 ndev->reg->db_iowrite(ndev->db_mask, mmio);
260 }
261 spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
fce8a7bb 262
e26a5843 263 return 0;
fce8a7bb
JM
264}
265
e26a5843
AH
266static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
267 void __iomem *mmio)
fce8a7bb 268{
e26a5843 269 unsigned long irqflags;
fce8a7bb 270
e26a5843
AH
271 WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_DB));
272
273 if (db_bits & ~ndev->db_valid_mask)
274 return -EINVAL;
fce8a7bb 275
e26a5843
AH
276 spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
277 {
278 ndev->db_mask &= ~db_bits;
279 ndev->reg->db_iowrite(ndev->db_mask, mmio);
280 }
281 spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
fce8a7bb 282
e26a5843 283 return 0;
fce8a7bb
JM
284}
285
e26a5843 286static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
fce8a7bb 287{
e26a5843 288 u64 shift, mask;
fce8a7bb 289
e26a5843
AH
290 shift = ndev->db_vec_shift;
291 mask = BIT_ULL(shift) - 1;
fce8a7bb 292
e26a5843 293 return mask << (shift * db_vector);
fce8a7bb
JM
294}
295
e26a5843
AH
296static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx,
297 phys_addr_t *spad_addr, phys_addr_t reg_addr,
298 unsigned long reg)
fce8a7bb 299{
e26a5843
AH
300 WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD));
301
302 if (idx < 0 || idx >= ndev->spad_count)
fce8a7bb
JM
303 return -EINVAL;
304
e26a5843
AH
305 if (spad_addr) {
306 *spad_addr = reg_addr + reg + (idx << 2);
307 dev_dbg(ndev_dev(ndev), "Peer spad addr %llx\n", *spad_addr);
308 }
fce8a7bb
JM
309
310 return 0;
311}
312
e26a5843
AH
313static inline u32 ndev_spad_read(struct intel_ntb_dev *ndev, int idx,
314 void __iomem *mmio)
fce8a7bb 315{
e26a5843 316 WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD));
fce8a7bb 317
e26a5843
AH
318 if (idx < 0 || idx >= ndev->spad_count)
319 return 0;
fce8a7bb 320
e26a5843 321 return ioread32(mmio + (idx << 2));
fce8a7bb
JM
322}
323
e26a5843
AH
324static inline int ndev_spad_write(struct intel_ntb_dev *ndev, int idx, u32 val,
325 void __iomem *mmio)
fce8a7bb 326{
e26a5843
AH
327 WARN_ON_ONCE(ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD));
328
329 if (idx < 0 || idx >= ndev->spad_count)
fce8a7bb
JM
330 return -EINVAL;
331
e26a5843 332 iowrite32(val, mmio + (idx << 2));
fce8a7bb
JM
333
334 return 0;
335}
336
e26a5843 337static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec)
282a2fee 338{
e26a5843
AH
339 u64 vec_mask;
340
341 vec_mask = ndev_vec_mask(ndev, vec);
342
343 dev_dbg(ndev_dev(ndev), "vec %d vec_mask %llx\n", vec, vec_mask);
282a2fee 344
e26a5843
AH
345 ndev->last_ts = jiffies;
346
347 if (vec_mask & ndev->db_link_mask) {
348 if (ndev->reg->poll_link(ndev))
349 ntb_link_event(&ndev->ntb);
350 }
351
352 if (vec_mask & ndev->db_valid_mask)
353 ntb_db_event(&ndev->ntb, vec);
354
355 return IRQ_HANDLED;
282a2fee
JM
356}
357
e26a5843 358static irqreturn_t ndev_vec_isr(int irq, void *dev)
fce8a7bb 359{
e26a5843 360 struct intel_ntb_vec *nvec = dev;
fce8a7bb 361
e26a5843 362 return ndev_interrupt(nvec->ndev, nvec->num);
fce8a7bb
JM
363}
364
e26a5843 365static irqreturn_t ndev_irq_isr(int irq, void *dev)
fce8a7bb 366{
e26a5843 367 struct intel_ntb_dev *ndev = dev;
fce8a7bb 368
e26a5843 369 return ndev_interrupt(ndev, irq - ndev_pdev(ndev)->irq);
fce8a7bb
JM
370}
371
e26a5843
AH
372static int ndev_init_isr(struct intel_ntb_dev *ndev,
373 int msix_min, int msix_max,
374 int msix_shift, int total_shift)
fce8a7bb 375{
e26a5843
AH
376 struct pci_dev *pdev;
377 int rc, i, msix_count;
fce8a7bb 378
e26a5843 379 pdev = ndev_pdev(ndev);
fce8a7bb 380
e26a5843
AH
381 /* Mask all doorbell interrupts */
382 ndev->db_mask = ndev->db_valid_mask;
383 ndev->reg->db_iowrite(ndev->db_mask,
384 ndev->self_mmio +
385 ndev->self_reg->db_mask);
fce8a7bb 386
e26a5843
AH
387 /* Try to set up msix irq */
388
389 ndev->vec = kcalloc(msix_max, sizeof(*ndev->vec), GFP_KERNEL);
390 if (!ndev->vec)
391 goto err_msix_vec_alloc;
392
393 ndev->msix = kcalloc(msix_max, sizeof(*ndev->msix), GFP_KERNEL);
394 if (!ndev->msix)
395 goto err_msix_alloc;
396
397 for (i = 0; i < msix_max; ++i)
398 ndev->msix[i].entry = i;
399
400 msix_count = pci_enable_msix_range(pdev, ndev->msix,
401 msix_min, msix_max);
402 if (msix_count < 0)
403 goto err_msix_enable;
404
405 for (i = 0; i < msix_count; ++i) {
406 ndev->vec[i].ndev = ndev;
407 ndev->vec[i].num = i;
408 rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
409 "ndev_vec_isr", &ndev->vec[i]);
410 if (rc)
411 goto err_msix_request;
fce8a7bb 412 }
fce8a7bb 413
e26a5843
AH
414 dev_dbg(ndev_dev(ndev), "Using msix interrupts\n");
415 ndev->db_vec_count = msix_count;
416 ndev->db_vec_shift = msix_shift;
417 return 0;
fce8a7bb 418
e26a5843
AH
419err_msix_request:
420 while (i-- > 0)
421 free_irq(ndev->msix[i].vector, ndev);
422 pci_disable_msix(pdev);
423err_msix_enable:
424 kfree(ndev->msix);
425err_msix_alloc:
426 kfree(ndev->vec);
427err_msix_vec_alloc:
428 ndev->msix = NULL;
429 ndev->vec = NULL;
fce8a7bb 430
e26a5843 431 /* Try to set up msi irq */
113bf1c9 432
e26a5843
AH
433 rc = pci_enable_msi(pdev);
434 if (rc)
435 goto err_msi_enable;
113bf1c9 436
e26a5843
AH
437 rc = request_irq(pdev->irq, ndev_irq_isr, 0,
438 "ndev_irq_isr", ndev);
439 if (rc)
440 goto err_msi_request;
113bf1c9 441
e26a5843
AH
442 dev_dbg(ndev_dev(ndev), "Using msi interrupts\n");
443 ndev->db_vec_count = 1;
444 ndev->db_vec_shift = total_shift;
445 return 0;
113bf1c9 446
e26a5843
AH
447err_msi_request:
448 pci_disable_msi(pdev);
449err_msi_enable:
113bf1c9 450
e26a5843 451 /* Try to set up intx irq */
113bf1c9 452
e26a5843 453 pci_intx(pdev, 1);
113bf1c9 454
e26a5843
AH
455 rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
456 "ndev_irq_isr", ndev);
457 if (rc)
458 goto err_intx_request;
459
460 dev_dbg(ndev_dev(ndev), "Using intx interrupts\n");
461 ndev->db_vec_count = 1;
462 ndev->db_vec_shift = total_shift;
463 return 0;
464
465err_intx_request:
466 return rc;
113bf1c9
JM
467}
468
e26a5843 469static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
fce8a7bb 470{
e26a5843
AH
471 struct pci_dev *pdev;
472 int i;
fce8a7bb 473
e26a5843 474 pdev = ndev_pdev(ndev);
fce8a7bb 475
e26a5843
AH
476 /* Mask all doorbell interrupts */
477 ndev->db_mask = ndev->db_valid_mask;
478 ndev->reg->db_iowrite(ndev->db_mask,
479 ndev->self_mmio +
480 ndev->self_reg->db_mask);
113bf1c9 481
e26a5843
AH
482 if (ndev->msix) {
483 i = ndev->db_vec_count;
484 while (i--)
485 free_irq(ndev->msix[i].vector, &ndev->vec[i]);
486 pci_disable_msix(pdev);
487 kfree(ndev->msix);
488 kfree(ndev->vec);
fce8a7bb 489 } else {
e26a5843
AH
490 free_irq(pdev->irq, ndev);
491 if (pci_dev_msi_enabled(pdev))
492 pci_disable_msi(pdev);
fce8a7bb 493 }
fce8a7bb
JM
494}
495
e26a5843
AH
496static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
497 size_t count, loff_t *offp)
fce8a7bb 498{
e26a5843
AH
499 struct intel_ntb_dev *ndev;
500 void __iomem *mmio;
501 char *buf;
502 size_t buf_size;
503 ssize_t ret, off;
504 union { u64 v64; u32 v32; u16 v16; } u;
fce8a7bb 505
e26a5843
AH
506 ndev = filp->private_data;
507 mmio = ndev->self_mmio;
fce8a7bb 508
e26a5843 509 buf_size = min(count, 0x800ul);
fce8a7bb 510
e26a5843
AH
511 buf = kmalloc(buf_size, GFP_KERNEL);
512 if (!buf)
513 return -ENOMEM;
fce8a7bb 514
e26a5843 515 off = 0;
fce8a7bb 516
e26a5843
AH
517 off += scnprintf(buf + off, buf_size - off,
518 "NTB Device Information:\n");
fce8a7bb 519
e26a5843
AH
520 off += scnprintf(buf + off, buf_size - off,
521 "Connection Topology -\t%s\n",
522 ntb_topo_string(ndev->ntb.topo));
fce8a7bb 523
e26a5843
AH
524 off += scnprintf(buf + off, buf_size - off,
525 "B2B Offset -\t\t%#lx\n", ndev->b2b_off);
526 off += scnprintf(buf + off, buf_size - off,
527 "B2B MW Idx -\t\t%d\n", ndev->b2b_idx);
528 off += scnprintf(buf + off, buf_size - off,
529 "BAR4 Split -\t\t%s\n",
530 ndev->bar4_split ? "yes" : "no");
113bf1c9 531
e26a5843
AH
532 off += scnprintf(buf + off, buf_size - off,
533 "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
534 off += scnprintf(buf + off, buf_size - off,
535 "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
536
537 if (!ndev->reg->link_is_up(ndev)) {
538 off += scnprintf(buf + off, buf_size - off,
539 "Link Status -\t\tDown\n");
540 } else {
541 off += scnprintf(buf + off, buf_size - off,
542 "Link Status -\t\tUp\n");
543 off += scnprintf(buf + off, buf_size - off,
544 "Link Speed -\t\tPCI-E Gen %u\n",
545 NTB_LNK_STA_SPEED(ndev->lnk_sta));
546 off += scnprintf(buf + off, buf_size - off,
547 "Link Width -\t\tx%u\n",
548 NTB_LNK_STA_WIDTH(ndev->lnk_sta));
113bf1c9
JM
549 }
550
e26a5843
AH
551 off += scnprintf(buf + off, buf_size - off,
552 "Memory Window Count -\t%u\n", ndev->mw_count);
553 off += scnprintf(buf + off, buf_size - off,
554 "Scratchpad Count -\t%u\n", ndev->spad_count);
555 off += scnprintf(buf + off, buf_size - off,
556 "Doorbell Count -\t%u\n", ndev->db_count);
557 off += scnprintf(buf + off, buf_size - off,
558 "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
559 off += scnprintf(buf + off, buf_size - off,
560 "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
561
562 off += scnprintf(buf + off, buf_size - off,
563 "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
564 off += scnprintf(buf + off, buf_size - off,
565 "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
566 off += scnprintf(buf + off, buf_size - off,
567 "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
568
569 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
570 off += scnprintf(buf + off, buf_size - off,
571 "Doorbell Mask -\t\t%#llx\n", u.v64);
572
573 u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
574 off += scnprintf(buf + off, buf_size - off,
575 "Doorbell Bell -\t\t%#llx\n", u.v64);
576
577 off += scnprintf(buf + off, buf_size - off,
578 "\nNTB Incoming XLAT:\n");
579
580 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2));
581 off += scnprintf(buf + off, buf_size - off,
582 "XLAT23 -\t\t%#018llx\n", u.v64);
583
584 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
585 off += scnprintf(buf + off, buf_size - off,
586 "XLAT45 -\t\t%#018llx\n", u.v64);
587
588 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 2));
589 off += scnprintf(buf + off, buf_size - off,
590 "LMT23 -\t\t\t%#018llx\n", u.v64);
591
592 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
593 off += scnprintf(buf + off, buf_size - off,
594 "LMT45 -\t\t\t%#018llx\n", u.v64);
595
596 if (pdev_is_snb(ndev->ntb.pdev)) {
597 if (ntb_topo_is_b2b(ndev->ntb.topo)) {
598 off += scnprintf(buf + off, buf_size - off,
599 "\nNTB Outgoing B2B XLAT:\n");
600
601 u.v64 = ioread64(mmio + SNB_PBAR23XLAT_OFFSET);
602 off += scnprintf(buf + off, buf_size - off,
603 "B2B XLAT23 -\t\t%#018llx\n", u.v64);
604
605 u.v64 = ioread64(mmio + SNB_PBAR45XLAT_OFFSET);
606 off += scnprintf(buf + off, buf_size - off,
607 "B2B XLAT45 -\t\t%#018llx\n", u.v64);
608
609 u.v64 = ioread64(mmio + SNB_PBAR23LMT_OFFSET);
610 off += scnprintf(buf + off, buf_size - off,
611 "B2B LMT23 -\t\t%#018llx\n", u.v64);
612
613 u.v64 = ioread64(mmio + SNB_PBAR45LMT_OFFSET);
614 off += scnprintf(buf + off, buf_size - off,
615 "B2B LMT45 -\t\t%#018llx\n", u.v64);
616
617 off += scnprintf(buf + off, buf_size - off,
618 "\nNTB Secondary BAR:\n");
619
620 u.v64 = ioread64(mmio + SNB_SBAR0BASE_OFFSET);
621 off += scnprintf(buf + off, buf_size - off,
622 "SBAR01 -\t\t%#018llx\n", u.v64);
623
624 u.v64 = ioread64(mmio + SNB_SBAR23BASE_OFFSET);
625 off += scnprintf(buf + off, buf_size - off,
626 "SBAR23 -\t\t%#018llx\n", u.v64);
627
628 u.v64 = ioread64(mmio + SNB_SBAR45BASE_OFFSET);
629 off += scnprintf(buf + off, buf_size - off,
630 "SBAR45 -\t\t%#018llx\n", u.v64);
631 }
632
633 off += scnprintf(buf + off, buf_size - off,
634 "\nSNB NTB Statistics:\n");
635
636 u.v16 = ioread16(mmio + SNB_USMEMMISS_OFFSET);
637 off += scnprintf(buf + off, buf_size - off,
638 "Upstream Memory Miss -\t%u\n", u.v16);
639
640 off += scnprintf(buf + off, buf_size - off,
641 "\nSNB NTB Hardware Errors:\n");
642
643 if (!pci_read_config_word(ndev->ntb.pdev,
644 SNB_DEVSTS_OFFSET, &u.v16))
645 off += scnprintf(buf + off, buf_size - off,
646 "DEVSTS -\t\t%#06x\n", u.v16);
647
648 if (!pci_read_config_word(ndev->ntb.pdev,
649 SNB_LINK_STATUS_OFFSET, &u.v16))
650 off += scnprintf(buf + off, buf_size - off,
651 "LNKSTS -\t\t%#06x\n", u.v16);
113bf1c9 652
e26a5843
AH
653 if (!pci_read_config_dword(ndev->ntb.pdev,
654 SNB_UNCERRSTS_OFFSET, &u.v32))
655 off += scnprintf(buf + off, buf_size - off,
656 "UNCERRSTS -\t\t%#06x\n", u.v32);
657
658 if (!pci_read_config_dword(ndev->ntb.pdev,
659 SNB_CORERRSTS_OFFSET, &u.v32))
660 off += scnprintf(buf + off, buf_size - off,
661 "CORERRSTS -\t\t%#06x\n", u.v32);
662 }
663
664 ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
665 kfree(buf);
666 return ret;
113bf1c9
JM
667}
668
e26a5843 669static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
fce8a7bb 670{
e26a5843
AH
671 if (!debugfs_dir) {
672 ndev->debugfs_dir = NULL;
673 ndev->debugfs_info = NULL;
674 } else {
675 ndev->debugfs_dir =
676 debugfs_create_dir(ndev_name(ndev), debugfs_dir);
677 if (!ndev->debugfs_dir)
678 ndev->debugfs_info = NULL;
679 else
680 ndev->debugfs_info =
681 debugfs_create_file("info", S_IRUSR,
682 ndev->debugfs_dir, ndev,
683 &intel_ntb_debugfs_info);
fce8a7bb 684 }
e26a5843 685}
fce8a7bb 686
e26a5843
AH
687static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev)
688{
689 debugfs_remove_recursive(ndev->debugfs_dir);
fce8a7bb
JM
690}
691
e26a5843 692static int intel_ntb_mw_count(struct ntb_dev *ntb)
fce8a7bb 693{
e26a5843
AH
694 return ntb_ndev(ntb)->mw_count;
695}
ed6c24ed 696
e26a5843
AH
697static int intel_ntb_mw_get_range(struct ntb_dev *ntb, int idx,
698 phys_addr_t *base,
699 resource_size_t *size,
700 resource_size_t *align,
701 resource_size_t *align_size)
702{
703 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
704 int bar;
948d3a65 705
e26a5843
AH
706 if (idx >= ndev->b2b_idx && !ndev->b2b_off)
707 idx += 1;
ab760a0c 708
e26a5843
AH
709 bar = ndev_mw_to_bar(ndev, idx);
710 if (bar < 0)
711 return bar;
ed6c24ed 712
e26a5843
AH
713 if (base)
714 *base = pci_resource_start(ndev->ntb.pdev, bar) +
715 (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
069684e8 716
e26a5843
AH
717 if (size)
718 *size = pci_resource_len(ndev->ntb.pdev, bar) -
719 (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
fce8a7bb 720
e26a5843
AH
721 if (align)
722 *align = pci_resource_len(ndev->ntb.pdev, bar);
ed6c24ed 723
e26a5843
AH
724 if (align_size)
725 *align_size = 1;
fce8a7bb
JM
726
727 return 0;
728}
729
e26a5843
AH
730static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int idx,
731 dma_addr_t addr, resource_size_t size)
fce8a7bb 732{
e26a5843
AH
733 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
734 unsigned long base_reg, xlat_reg, limit_reg;
735 resource_size_t bar_size, mw_size;
736 void __iomem *mmio;
737 u64 base, limit, reg_val;
738 int bar;
fce8a7bb 739
e26a5843
AH
740 if (idx >= ndev->b2b_idx && !ndev->b2b_off)
741 idx += 1;
fce8a7bb 742
e26a5843
AH
743 bar = ndev_mw_to_bar(ndev, idx);
744 if (bar < 0)
745 return bar;
fce8a7bb 746
e26a5843
AH
747 bar_size = pci_resource_len(ndev->ntb.pdev, bar);
748
749 if (idx == ndev->b2b_idx)
750 mw_size = bar_size - ndev->b2b_off;
751 else
752 mw_size = bar_size;
753
754 /* hardware requires that addr is aligned to bar size */
755 if (addr & (bar_size - 1))
fce8a7bb 756 return -EINVAL;
e26a5843
AH
757
758 /* make sure the range fits in the usable mw size */
759 if (size > mw_size)
760 return -EINVAL;
761
762 mmio = ndev->self_mmio;
763 base_reg = bar0_off(ndev->xlat_reg->bar0_base, bar);
764 xlat_reg = bar2_off(ndev->xlat_reg->bar2_xlat, bar);
765 limit_reg = bar2_off(ndev->xlat_reg->bar2_limit, bar);
766
767 if (bar < 4 || !ndev->bar4_split) {
768 base = ioread64(mmio + base_reg);
769
770 /* Set the limit if supported, if size is not mw_size */
771 if (limit_reg && size != mw_size)
772 limit = base + size;
773 else
774 limit = 0;
775
776 /* set and verify setting the translation address */
777 iowrite64(addr, mmio + xlat_reg);
778 reg_val = ioread64(mmio + xlat_reg);
779 if (reg_val != addr) {
780 iowrite64(0, mmio + xlat_reg);
781 return -EIO;
782 }
783
784 /* set and verify setting the limit */
785 iowrite64(limit, mmio + limit_reg);
786 reg_val = ioread64(mmio + limit_reg);
787 if (reg_val != limit) {
788 iowrite64(base, mmio + limit_reg);
789 iowrite64(0, mmio + xlat_reg);
790 return -EIO;
791 }
792 } else {
793 /* split bar addr range must all be 32 bit */
794 if (addr & (~0ull << 32))
795 return -EINVAL;
796 if ((addr + size) & (~0ull << 32))
797 return -EINVAL;
798
799 base = ioread32(mmio + base_reg);
800
801 /* Set the limit if supported, if size is not mw_size */
802 if (limit_reg && size != mw_size)
803 limit = base + size;
804 else
805 limit = 0;
806
807 /* set and verify setting the translation address */
808 iowrite32(addr, mmio + xlat_reg);
809 reg_val = ioread32(mmio + xlat_reg);
810 if (reg_val != addr) {
811 iowrite32(0, mmio + xlat_reg);
812 return -EIO;
813 }
814
815 /* set and verify setting the limit */
816 iowrite32(limit, mmio + limit_reg);
817 reg_val = ioread32(mmio + limit_reg);
818 if (reg_val != limit) {
819 iowrite32(base, mmio + limit_reg);
820 iowrite32(0, mmio + xlat_reg);
821 return -EIO;
822 }
fce8a7bb
JM
823 }
824
e26a5843
AH
825 return 0;
826}
fce8a7bb 827
e26a5843
AH
828static int intel_ntb_link_is_up(struct ntb_dev *ntb,
829 enum ntb_speed *speed,
830 enum ntb_width *width)
831{
832 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
fce8a7bb 833
e26a5843
AH
834 if (ndev->reg->link_is_up(ndev)) {
835 if (speed)
836 *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
837 if (width)
838 *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
839 return 1;
840 } else {
841 /* TODO MAYBE: is it possible to observe the link speed and
842 * width while link is training? */
843 if (speed)
844 *speed = NTB_SPEED_NONE;
845 if (width)
846 *width = NTB_WIDTH_NONE;
847 return 0;
848 }
849}
850
851static int intel_ntb_link_enable(struct ntb_dev *ntb,
852 enum ntb_speed max_speed,
853 enum ntb_width max_width)
854{
855 struct intel_ntb_dev *ndev;
856 u32 ntb_ctl;
857
858 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
859
860 if (ndev->ntb.topo == NTB_TOPO_SEC)
861 return -EINVAL;
862
863 dev_dbg(ndev_dev(ndev),
864 "Enabling link with max_speed %d max_width %d\n",
865 max_speed, max_width);
866 if (max_speed != NTB_SPEED_AUTO)
867 dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed);
868 if (max_width != NTB_WIDTH_AUTO)
869 dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width);
870
871 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
872 ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
873 ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
874 ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
875 if (ndev->bar4_split)
876 ntb_ctl |= NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP;
877 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
fce8a7bb
JM
878
879 return 0;
880}
881
e26a5843 882static int intel_ntb_link_disable(struct ntb_dev *ntb)
fce8a7bb 883{
e26a5843
AH
884 struct intel_ntb_dev *ndev;
885 u32 ntb_cntl;
fce8a7bb 886
e26a5843 887 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
fce8a7bb 888
e26a5843
AH
889 if (ndev->ntb.topo == NTB_TOPO_SEC)
890 return -EINVAL;
3b12a0d1 891
e26a5843
AH
892 dev_dbg(ndev_dev(ndev), "Disabling link\n");
893
894 /* Bring NTB link down */
895 ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
896 ntb_cntl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
897 ntb_cntl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
898 if (ndev->bar4_split)
899 ntb_cntl &= ~(NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP);
900 ntb_cntl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
901 iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
fce8a7bb 902
3b12a0d1 903 return 0;
fce8a7bb
JM
904}
905
e26a5843 906static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb)
fce8a7bb 907{
e26a5843 908 return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB);
fce8a7bb
JM
909}
910
e26a5843 911static u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb)
fce8a7bb 912{
e26a5843
AH
913 return ntb_ndev(ntb)->db_valid_mask;
914}
fce8a7bb 915
e26a5843
AH
916static int intel_ntb_db_vector_count(struct ntb_dev *ntb)
917{
918 struct intel_ntb_dev *ndev;
fce8a7bb 919
e26a5843 920 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
e8aeb60c 921
e26a5843
AH
922 return ndev->db_vec_count;
923}
fce8a7bb 924
e26a5843
AH
925static u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
926{
927 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
fce8a7bb 928
e26a5843
AH
929 if (db_vector < 0 || db_vector > ndev->db_vec_count)
930 return 0;
fce8a7bb 931
e26a5843 932 return ndev->db_valid_mask & ndev_vec_mask(ndev, db_vector);
fce8a7bb
JM
933}
934
e26a5843 935static u64 intel_ntb_db_read(struct ntb_dev *ntb)
fce8a7bb 936{
e26a5843 937 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
fce8a7bb 938
e26a5843
AH
939 return ndev_db_read(ndev,
940 ndev->self_mmio +
941 ndev->self_reg->db_bell);
942}
fce8a7bb 943
e26a5843
AH
944static int intel_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
945{
946 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
e8aeb60c 947
e26a5843
AH
948 return ndev_db_write(ndev, db_bits,
949 ndev->self_mmio +
950 ndev->self_reg->db_bell);
951}
fce8a7bb 952
e26a5843
AH
953static int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
954{
955 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
fce8a7bb 956
e26a5843
AH
957 return ndev_db_set_mask(ndev, db_bits,
958 ndev->self_mmio +
959 ndev->self_reg->db_mask);
fce8a7bb
JM
960}
961
e26a5843 962static int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
fce8a7bb 963{
e26a5843 964 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
fce8a7bb 965
e26a5843
AH
966 return ndev_db_clear_mask(ndev, db_bits,
967 ndev->self_mmio +
968 ndev->self_reg->db_mask);
969}
fce8a7bb 970
e26a5843
AH
971static int intel_ntb_peer_db_addr(struct ntb_dev *ntb,
972 phys_addr_t *db_addr,
973 resource_size_t *db_size)
974{
975 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
fce8a7bb 976
e26a5843
AH
977 return ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
978 ndev->peer_reg->db_bell);
fce8a7bb
JM
979}
980
e26a5843 981static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
fce8a7bb 982{
e26a5843 983 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
fce8a7bb 984
e26a5843
AH
985 return ndev_db_write(ndev, db_bits,
986 ndev->peer_mmio +
987 ndev->peer_reg->db_bell);
988}
fce8a7bb 989
e26a5843
AH
990static int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb)
991{
992 return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_SPAD);
993}
fce8a7bb 994
e26a5843
AH
995static int intel_ntb_spad_count(struct ntb_dev *ntb)
996{
997 struct intel_ntb_dev *ndev;
fce8a7bb 998
e26a5843 999 ndev = container_of(ntb, struct intel_ntb_dev, ntb);
fce8a7bb 1000
e26a5843
AH
1001 return ndev->spad_count;
1002}
fce8a7bb 1003
e26a5843
AH
1004static u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx)
1005{
1006 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
fce8a7bb 1007
e26a5843
AH
1008 return ndev_spad_read(ndev, idx,
1009 ndev->self_mmio +
1010 ndev->self_reg->spad);
fce8a7bb
JM
1011}
1012
e26a5843
AH
1013static int intel_ntb_spad_write(struct ntb_dev *ntb,
1014 int idx, u32 val)
53a788a7 1015{
e26a5843 1016 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
53a788a7 1017
e26a5843
AH
1018 return ndev_spad_write(ndev, idx, val,
1019 ndev->self_mmio +
1020 ndev->self_reg->spad);
1021}
53a788a7 1022
e26a5843
AH
1023static int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
1024 phys_addr_t *spad_addr)
1025{
1026 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
53a788a7 1027
e26a5843
AH
1028 return ndev_spad_addr(ndev, idx, spad_addr, ndev->peer_addr,
1029 ndev->peer_reg->spad);
1030}
53a788a7 1031
e26a5843
AH
1032static u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
1033{
1034 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
53a788a7 1035
e26a5843
AH
1036 return ndev_spad_read(ndev, idx,
1037 ndev->peer_mmio +
1038 ndev->peer_reg->spad);
1039}
53a788a7 1040
e26a5843
AH
1041static int intel_ntb_peer_spad_write(struct ntb_dev *ntb,
1042 int idx, u32 val)
1043{
1044 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
53a788a7 1045
e26a5843
AH
1046 return ndev_spad_write(ndev, idx, val,
1047 ndev->peer_mmio +
1048 ndev->peer_reg->spad);
1049}
53a788a7 1050
e26a5843 1051/* BWD */
53a788a7 1052
e26a5843
AH
1053static u64 bwd_db_ioread(void __iomem *mmio)
1054{
1055 return ioread64(mmio);
1056}
1057
1058static void bwd_db_iowrite(u64 bits, void __iomem *mmio)
1059{
1060 iowrite64(bits, mmio);
53a788a7
AG
1061}
1062
e26a5843 1063static int bwd_poll_link(struct intel_ntb_dev *ndev)
fce8a7bb 1064{
e26a5843 1065 u32 ntb_ctl;
53a788a7 1066
e26a5843 1067 ntb_ctl = ioread32(ndev->self_mmio + BWD_NTBCNTL_OFFSET);
53a788a7 1068
e26a5843
AH
1069 if (ntb_ctl == ndev->ntb_ctl)
1070 return 0;
53a788a7 1071
e26a5843 1072 ndev->ntb_ctl = ntb_ctl;
53a788a7 1073
e26a5843 1074 ndev->lnk_sta = ioread32(ndev->self_mmio + BWD_LINK_STATUS_OFFSET);
53a788a7 1075
e26a5843
AH
1076 return 1;
1077}
53a788a7 1078
e26a5843
AH
1079static int bwd_link_is_up(struct intel_ntb_dev *ndev)
1080{
1081 return BWD_NTB_CTL_ACTIVE(ndev->ntb_ctl);
1082}
53a788a7 1083
e26a5843
AH
1084static int bwd_link_is_err(struct intel_ntb_dev *ndev)
1085{
1086 if (ioread32(ndev->self_mmio + BWD_LTSSMSTATEJMP_OFFSET)
1087 & BWD_LTSSMSTATEJMP_FORCEDETECT)
1088 return 1;
53a788a7 1089
e26a5843
AH
1090 if (ioread32(ndev->self_mmio + BWD_IBSTERRRCRVSTS0_OFFSET)
1091 & BWD_IBIST_ERR_OFLOW)
1092 return 1;
1093
1094 return 0;
53a788a7
AG
1095}
1096
e26a5843 1097static inline enum ntb_topo bwd_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd)
53a788a7 1098{
e26a5843
AH
1099 switch (ppd & BWD_PPD_TOPO_MASK) {
1100 case BWD_PPD_TOPO_B2B_USD:
1101 dev_dbg(ndev_dev(ndev), "PPD %d B2B USD\n", ppd);
1102 return NTB_TOPO_B2B_USD;
1103
1104 case BWD_PPD_TOPO_B2B_DSD:
1105 dev_dbg(ndev_dev(ndev), "PPD %d B2B DSD\n", ppd);
1106 return NTB_TOPO_B2B_DSD;
1107
1108 case BWD_PPD_TOPO_PRI_USD:
1109 case BWD_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
1110 case BWD_PPD_TOPO_SEC_USD:
1111 case BWD_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
1112 dev_dbg(ndev_dev(ndev), "PPD %d non B2B disabled\n", ppd);
1113 return NTB_TOPO_NONE;
1114 }
fce8a7bb 1115
e26a5843
AH
1116 dev_dbg(ndev_dev(ndev), "PPD %d invalid\n", ppd);
1117 return NTB_TOPO_NONE;
1118}
1119
1120static void bwd_link_hb(struct work_struct *work)
1121{
1122 struct intel_ntb_dev *ndev = hb_ndev(work);
1123 unsigned long poll_ts;
1124 void __iomem *mmio;
1125 u32 status32;
1126
1127 poll_ts = ndev->last_ts + BWD_LINK_HB_TIMEOUT;
1128
1129 /* Delay polling the link status if an interrupt was received,
1130 * unless the cached link status says the link is down.
1131 */
1132 if (time_after(poll_ts, jiffies) && bwd_link_is_up(ndev)) {
1133 schedule_delayed_work(&ndev->hb_timer, poll_ts - jiffies);
1134 return;
fce8a7bb
JM
1135 }
1136
e26a5843
AH
1137 if (bwd_poll_link(ndev))
1138 ntb_link_event(&ndev->ntb);
1139
1140 if (bwd_link_is_up(ndev) || !bwd_link_is_err(ndev)) {
1141 schedule_delayed_work(&ndev->hb_timer, BWD_LINK_HB_TIMEOUT);
1142 return;
fce8a7bb
JM
1143 }
1144
e26a5843 1145 /* Link is down with error: recover the link! */
fce8a7bb 1146
e26a5843 1147 mmio = ndev->self_mmio;
fce8a7bb 1148
e26a5843
AH
1149 /* Driver resets the NTB ModPhy lanes - magic! */
1150 iowrite8(0xe0, mmio + BWD_MODPHY_PCSREG6);
1151 iowrite8(0x40, mmio + BWD_MODPHY_PCSREG4);
1152 iowrite8(0x60, mmio + BWD_MODPHY_PCSREG4);
1153 iowrite8(0x60, mmio + BWD_MODPHY_PCSREG6);
fce8a7bb 1154
e26a5843
AH
1155 /* Driver waits 100ms to allow the NTB ModPhy to settle */
1156 msleep(100);
1157
1158 /* Clear AER Errors, write to clear */
1159 status32 = ioread32(mmio + BWD_ERRCORSTS_OFFSET);
1160 dev_dbg(ndev_dev(ndev), "ERRCORSTS = %x\n", status32);
1161 status32 &= PCI_ERR_COR_REP_ROLL;
1162 iowrite32(status32, mmio + BWD_ERRCORSTS_OFFSET);
1163
1164 /* Clear unexpected electrical idle event in LTSSM, write to clear */
1165 status32 = ioread32(mmio + BWD_LTSSMERRSTS0_OFFSET);
1166 dev_dbg(ndev_dev(ndev), "LTSSMERRSTS0 = %x\n", status32);
1167 status32 |= BWD_LTSSMERRSTS0_UNEXPECTEDEI;
1168 iowrite32(status32, mmio + BWD_LTSSMERRSTS0_OFFSET);
1169
1170 /* Clear DeSkew Buffer error, write to clear */
1171 status32 = ioread32(mmio + BWD_DESKEWSTS_OFFSET);
1172 dev_dbg(ndev_dev(ndev), "DESKEWSTS = %x\n", status32);
1173 status32 |= BWD_DESKEWSTS_DBERR;
1174 iowrite32(status32, mmio + BWD_DESKEWSTS_OFFSET);
1175
1176 status32 = ioread32(mmio + BWD_IBSTERRRCRVSTS0_OFFSET);
1177 dev_dbg(ndev_dev(ndev), "IBSTERRRCRVSTS0 = %x\n", status32);
1178 status32 &= BWD_IBIST_ERR_OFLOW;
1179 iowrite32(status32, mmio + BWD_IBSTERRRCRVSTS0_OFFSET);
1180
1181 /* Releases the NTB state machine to allow the link to retrain */
1182 status32 = ioread32(mmio + BWD_LTSSMSTATEJMP_OFFSET);
1183 dev_dbg(ndev_dev(ndev), "LTSSMSTATEJMP = %x\n", status32);
1184 status32 &= ~BWD_LTSSMSTATEJMP_FORCEDETECT;
1185 iowrite32(status32, mmio + BWD_LTSSMSTATEJMP_OFFSET);
1186
1187 /* There is a potential race between the 2 NTB devices recovering at the
1188 * same time. If the times are the same, the link will not recover and
1189 * the driver will be stuck in this loop forever. Add a random interval
1190 * to the recovery time to prevent this race.
1191 */
1192 schedule_delayed_work(&ndev->hb_timer, BWD_LINK_RECOVERY_TIME
1193 + prandom_u32() % BWD_LINK_RECOVERY_TIME);
fce8a7bb
JM
1194}
1195
e26a5843 1196static int bwd_init_isr(struct intel_ntb_dev *ndev)
fce8a7bb 1197{
fce8a7bb
JM
1198 int rc;
1199
e26a5843
AH
1200 rc = ndev_init_isr(ndev, 1, BWD_DB_MSIX_VECTOR_COUNT,
1201 BWD_DB_MSIX_VECTOR_SHIFT, BWD_DB_TOTAL_SHIFT);
fce8a7bb
JM
1202 if (rc)
1203 return rc;
1204
e26a5843
AH
1205 /* BWD doesn't have link status interrupt, poll on that platform */
1206 ndev->last_ts = jiffies;
1207 INIT_DELAYED_WORK(&ndev->hb_timer, bwd_link_hb);
1208 schedule_delayed_work(&ndev->hb_timer, BWD_LINK_HB_TIMEOUT);
fce8a7bb
JM
1209
1210 return 0;
1211}
1212
e26a5843 1213static void bwd_deinit_isr(struct intel_ntb_dev *ndev)
fce8a7bb 1214{
e26a5843
AH
1215 cancel_delayed_work_sync(&ndev->hb_timer);
1216 ndev_deinit_isr(ndev);
1217}
fce8a7bb 1218
e26a5843
AH
1219static int bwd_init_ntb(struct intel_ntb_dev *ndev)
1220{
1221 ndev->mw_count = BWD_MW_COUNT;
1222 ndev->spad_count = BWD_SPAD_COUNT;
1223 ndev->db_count = BWD_DB_COUNT;
fce8a7bb 1224
e26a5843
AH
1225 switch (ndev->ntb.topo) {
1226 case NTB_TOPO_B2B_USD:
1227 case NTB_TOPO_B2B_DSD:
1228 ndev->self_reg = &bwd_pri_reg;
1229 ndev->peer_reg = &bwd_b2b_reg;
1230 ndev->xlat_reg = &bwd_sec_xlat;
1231
1232 /* Enable Bus Master and Memory Space on the secondary side */
1233 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
1234 ndev->self_mmio + BWD_SPCICMD_OFFSET);
1235
1236 break;
1237
1238 default:
1239 return -EINVAL;
1240 }
1241
1242 ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
fce8a7bb
JM
1243
1244 return 0;
1245}
1246
e26a5843 1247static int bwd_init_dev(struct intel_ntb_dev *ndev)
fce8a7bb 1248{
e26a5843 1249 u32 ppd;
fce8a7bb
JM
1250 int rc;
1251
e26a5843
AH
1252 rc = pci_read_config_dword(ndev->ntb.pdev, BWD_PPD_OFFSET, &ppd);
1253 if (rc)
1254 return -EIO;
fce8a7bb 1255
e26a5843
AH
1256 ndev->ntb.topo = bwd_ppd_topo(ndev, ppd);
1257 if (ndev->ntb.topo == NTB_TOPO_NONE)
1258 return -EINVAL;
fce8a7bb 1259
e26a5843
AH
1260 rc = bwd_init_ntb(ndev);
1261 if (rc)
1262 return rc;
fce8a7bb 1263
e26a5843
AH
1264 rc = bwd_init_isr(ndev);
1265 if (rc)
fce8a7bb 1266 return rc;
e26a5843
AH
1267
1268 if (ndev->ntb.topo != NTB_TOPO_SEC) {
1269 /* Initiate PCI-E link training */
1270 rc = pci_write_config_dword(ndev->ntb.pdev, BWD_PPD_OFFSET,
1271 ppd | BWD_PPD_INIT_LINK);
1272 if (rc)
1273 return rc;
fce8a7bb
JM
1274 }
1275
fce8a7bb
JM
1276 return 0;
1277}
1278
e26a5843 1279static void bwd_deinit_dev(struct intel_ntb_dev *ndev)
fce8a7bb 1280{
e26a5843
AH
1281 bwd_deinit_isr(ndev);
1282}
fce8a7bb 1283
e26a5843 1284/* SNB */
fce8a7bb 1285
e26a5843
AH
1286static u64 snb_db_ioread(void __iomem *mmio)
1287{
1288 return (u64)ioread16(mmio);
1289}
fce8a7bb 1290
e26a5843
AH
1291static void snb_db_iowrite(u64 bits, void __iomem *mmio)
1292{
1293 iowrite16((u16)bits, mmio);
1294}
fce8a7bb 1295
e26a5843
AH
1296static int snb_poll_link(struct intel_ntb_dev *ndev)
1297{
1298 u16 reg_val;
1299 int rc;
1300
1301 ndev->reg->db_iowrite(ndev->db_link_mask,
1302 ndev->self_mmio +
1303 ndev->self_reg->db_bell);
1304
1305 rc = pci_read_config_word(ndev->ntb.pdev,
1306 SNB_LINK_STATUS_OFFSET, &reg_val);
1307 if (rc)
1308 return 0;
1309
1310 if (reg_val == ndev->lnk_sta)
1311 return 0;
1312
1313 ndev->lnk_sta = reg_val;
1314
1315 return 1;
fce8a7bb
JM
1316}
1317
e26a5843 1318static int snb_link_is_up(struct intel_ntb_dev *ndev)
fce8a7bb 1319{
e26a5843
AH
1320 return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
1321}
fce8a7bb 1322
e26a5843
AH
1323static inline enum ntb_topo snb_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd)
1324{
1325 switch (ppd & SNB_PPD_TOPO_MASK) {
1326 case SNB_PPD_TOPO_B2B_USD:
1327 return NTB_TOPO_B2B_USD;
1328
1329 case SNB_PPD_TOPO_B2B_DSD:
1330 return NTB_TOPO_B2B_DSD;
1331
1332 case SNB_PPD_TOPO_PRI_USD:
1333 case SNB_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
1334 return NTB_TOPO_PRI;
fce8a7bb 1335
e26a5843
AH
1336 case SNB_PPD_TOPO_SEC_USD:
1337 case SNB_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
1338 return NTB_TOPO_SEC;
fce8a7bb
JM
1339 }
1340
e26a5843 1341 return NTB_TOPO_NONE;
fce8a7bb
JM
1342}
1343
e26a5843 1344static inline int snb_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd)
fce8a7bb 1345{
e26a5843
AH
1346 if (ppd & SNB_PPD_SPLIT_BAR_MASK) {
1347 dev_dbg(ndev_dev(ndev), "PPD %d split bar\n", ppd);
1348 return 1;
1349 }
1350 return 0;
1351}
fce8a7bb 1352
e26a5843
AH
1353static int snb_init_isr(struct intel_ntb_dev *ndev)
1354{
1355 return ndev_init_isr(ndev, SNB_DB_MSIX_VECTOR_COUNT,
1356 SNB_DB_MSIX_VECTOR_COUNT,
1357 SNB_DB_MSIX_VECTOR_SHIFT,
1358 SNB_DB_TOTAL_SHIFT);
1359}
fce8a7bb 1360
e26a5843
AH
1361static void snb_deinit_isr(struct intel_ntb_dev *ndev)
1362{
1363 ndev_deinit_isr(ndev);
fce8a7bb
JM
1364}
1365
e26a5843
AH
1366static int snb_setup_b2b_mw(struct intel_ntb_dev *ndev,
1367 const struct intel_b2b_addr *addr,
1368 const struct intel_b2b_addr *peer_addr)
6465d02e 1369{
e26a5843
AH
1370 struct pci_dev *pdev;
1371 void __iomem *mmio;
1372 resource_size_t bar_size;
1373 phys_addr_t bar_addr;
1374 int b2b_bar;
1375 u8 bar_sz;
1376
1377 pdev = ndev_pdev(ndev);
1378 mmio = ndev->self_mmio;
1379
1380 if (ndev->b2b_idx >= ndev->mw_count) {
1381 dev_dbg(ndev_dev(ndev), "not using b2b mw\n");
1382 b2b_bar = 0;
1383 ndev->b2b_off = 0;
1384 } else {
1385 b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
1386 if (b2b_bar < 0)
1387 return -EIO;
6465d02e 1388
e26a5843 1389 dev_dbg(ndev_dev(ndev), "using b2b mw bar %d\n", b2b_bar);
6465d02e 1390
e26a5843 1391 bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
6465d02e 1392
e26a5843 1393 dev_dbg(ndev_dev(ndev), "b2b bar size %#llx\n", bar_size);
6465d02e 1394
e26a5843
AH
1395 if (b2b_mw_share && SNB_B2B_MIN_SIZE <= bar_size >> 1) {
1396 dev_dbg(ndev_dev(ndev),
1397 "b2b using first half of bar\n");
1398 ndev->b2b_off = bar_size >> 1;
1399 } else if (SNB_B2B_MIN_SIZE <= bar_size) {
1400 dev_dbg(ndev_dev(ndev),
1401 "b2b using whole bar\n");
1402 ndev->b2b_off = 0;
1403 --ndev->mw_count;
1404 } else {
1405 dev_dbg(ndev_dev(ndev),
1406 "b2b bar size is too small\n");
1407 return -EIO;
1408 }
6465d02e
JM
1409 }
1410
e26a5843
AH
1411 /* Reset the secondary bar sizes to match the primary bar sizes,
1412 * except disable or halve the size of the b2b secondary bar.
1413 *
1414 * Note: code for each specific bar size register, because the register
1415 * offsets are not in a consistent order (bar5sz comes after ppd, odd).
1416 */
1417 pci_read_config_byte(pdev, SNB_PBAR23SZ_OFFSET, &bar_sz);
1418 dev_dbg(ndev_dev(ndev), "PBAR23SZ %#x\n", bar_sz);
1419 if (b2b_bar == 2) {
1420 if (ndev->b2b_off)
1421 bar_sz -= 1;
1422 else
1423 bar_sz = 0;
1424 }
1425 pci_write_config_byte(pdev, SNB_SBAR23SZ_OFFSET, bar_sz);
1426 pci_read_config_byte(pdev, SNB_SBAR23SZ_OFFSET, &bar_sz);
1427 dev_dbg(ndev_dev(ndev), "SBAR23SZ %#x\n", bar_sz);
1428
1429 if (!ndev->bar4_split) {
1430 pci_read_config_byte(pdev, SNB_PBAR45SZ_OFFSET, &bar_sz);
1431 dev_dbg(ndev_dev(ndev), "PBAR45SZ %#x\n", bar_sz);
1432 if (b2b_bar == 4) {
1433 if (ndev->b2b_off)
1434 bar_sz -= 1;
1435 else
1436 bar_sz = 0;
1437 }
1438 pci_write_config_byte(pdev, SNB_SBAR45SZ_OFFSET, bar_sz);
1439 pci_read_config_byte(pdev, SNB_SBAR45SZ_OFFSET, &bar_sz);
1440 dev_dbg(ndev_dev(ndev), "SBAR45SZ %#x\n", bar_sz);
1441 } else {
1442 pci_read_config_byte(pdev, SNB_PBAR4SZ_OFFSET, &bar_sz);
1443 dev_dbg(ndev_dev(ndev), "PBAR4SZ %#x\n", bar_sz);
1444 if (b2b_bar == 4) {
1445 if (ndev->b2b_off)
1446 bar_sz -= 1;
1447 else
1448 bar_sz = 0;
1449 }
1450 pci_write_config_byte(pdev, SNB_SBAR4SZ_OFFSET, bar_sz);
1451 pci_read_config_byte(pdev, SNB_SBAR4SZ_OFFSET, &bar_sz);
1452 dev_dbg(ndev_dev(ndev), "SBAR4SZ %#x\n", bar_sz);
1453
1454 pci_read_config_byte(pdev, SNB_PBAR5SZ_OFFSET, &bar_sz);
1455 dev_dbg(ndev_dev(ndev), "PBAR5SZ %#x\n", bar_sz);
1456 if (b2b_bar == 5) {
1457 if (ndev->b2b_off)
1458 bar_sz -= 1;
1459 else
1460 bar_sz = 0;
1461 }
1462 pci_write_config_byte(pdev, SNB_SBAR5SZ_OFFSET, bar_sz);
1463 pci_read_config_byte(pdev, SNB_SBAR5SZ_OFFSET, &bar_sz);
1464 dev_dbg(ndev_dev(ndev), "SBAR5SZ %#x\n", bar_sz);
1465 }
6465d02e 1466
e26a5843
AH
1467 /* SBAR01 hit by first part of the b2b bar */
1468 if (b2b_bar == 0)
1469 bar_addr = addr->bar0_addr;
1470 else if (b2b_bar == 2)
1471 bar_addr = addr->bar2_addr64;
1472 else if (b2b_bar == 4 && !ndev->bar4_split)
1473 bar_addr = addr->bar4_addr64;
1474 else if (b2b_bar == 4)
1475 bar_addr = addr->bar4_addr32;
1476 else if (b2b_bar == 5)
1477 bar_addr = addr->bar5_addr32;
1478 else
1479 return -EIO;
6465d02e 1480
e26a5843
AH
1481 dev_dbg(ndev_dev(ndev), "SBAR01 %#018llx\n", bar_addr);
1482 iowrite64(bar_addr, mmio + SNB_SBAR0BASE_OFFSET);
6465d02e 1483
e26a5843
AH
1484 /* Other SBAR are normally hit by the PBAR xlat, except for b2b bar.
1485 * The b2b bar is either disabled above, or configured half-size, and
1486 * it starts at the PBAR xlat + offset.
1487 */
1517a3f2 1488
e26a5843
AH
1489 bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
1490 iowrite64(bar_addr, mmio + SNB_SBAR23BASE_OFFSET);
1491 bar_addr = ioread64(mmio + SNB_SBAR23BASE_OFFSET);
1492 dev_dbg(ndev_dev(ndev), "SBAR23 %#018llx\n", bar_addr);
1493
1494 if (!ndev->bar4_split) {
1495 bar_addr = addr->bar4_addr64 +
1496 (b2b_bar == 4 ? ndev->b2b_off : 0);
1497 iowrite64(bar_addr, mmio + SNB_SBAR45BASE_OFFSET);
1498 bar_addr = ioread64(mmio + SNB_SBAR45BASE_OFFSET);
1499 dev_dbg(ndev_dev(ndev), "SBAR45 %#018llx\n", bar_addr);
1500 } else {
1501 bar_addr = addr->bar4_addr32 +
1502 (b2b_bar == 4 ? ndev->b2b_off : 0);
1503 iowrite32(bar_addr, mmio + SNB_SBAR4BASE_OFFSET);
1504 bar_addr = ioread32(mmio + SNB_SBAR4BASE_OFFSET);
1505 dev_dbg(ndev_dev(ndev), "SBAR4 %#010llx\n", bar_addr);
1506
1507 bar_addr = addr->bar5_addr32 +
1508 (b2b_bar == 5 ? ndev->b2b_off : 0);
1509 iowrite32(bar_addr, mmio + SNB_SBAR5BASE_OFFSET);
1510 bar_addr = ioread32(mmio + SNB_SBAR5BASE_OFFSET);
1511 dev_dbg(ndev_dev(ndev), "SBAR5 %#010llx\n", bar_addr);
1512 }
1517a3f2 1513
e26a5843 1514 /* setup incoming bar limits == base addrs (zero length windows) */
1517a3f2 1515
e26a5843
AH
1516 bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
1517 iowrite64(bar_addr, mmio + SNB_SBAR23LMT_OFFSET);
1518 bar_addr = ioread64(mmio + SNB_SBAR23LMT_OFFSET);
1519 dev_dbg(ndev_dev(ndev), "SBAR23LMT %#018llx\n", bar_addr);
1517a3f2 1520
e26a5843
AH
1521 if (!ndev->bar4_split) {
1522 bar_addr = addr->bar4_addr64 +
1523 (b2b_bar == 4 ? ndev->b2b_off : 0);
1524 iowrite64(bar_addr, mmio + SNB_SBAR45LMT_OFFSET);
1525 bar_addr = ioread64(mmio + SNB_SBAR45LMT_OFFSET);
1526 dev_dbg(ndev_dev(ndev), "SBAR45LMT %#018llx\n", bar_addr);
1527 } else {
1528 bar_addr = addr->bar4_addr32 +
1529 (b2b_bar == 4 ? ndev->b2b_off : 0);
1530 iowrite32(bar_addr, mmio + SNB_SBAR4LMT_OFFSET);
1531 bar_addr = ioread32(mmio + SNB_SBAR4LMT_OFFSET);
1532 dev_dbg(ndev_dev(ndev), "SBAR4LMT %#010llx\n", bar_addr);
1533
1534 bar_addr = addr->bar5_addr32 +
1535 (b2b_bar == 5 ? ndev->b2b_off : 0);
1536 iowrite32(bar_addr, mmio + SNB_SBAR5LMT_OFFSET);
1537 bar_addr = ioread32(mmio + SNB_SBAR5LMT_OFFSET);
1538 dev_dbg(ndev_dev(ndev), "SBAR5LMT %#05llx\n", bar_addr);
1517a3f2 1539 }
1517a3f2 1540
e26a5843
AH
1541 /* zero incoming translation addrs */
1542 iowrite64(0, mmio + SNB_SBAR23XLAT_OFFSET);
78958433 1543
e26a5843
AH
1544 if (!ndev->bar4_split) {
1545 iowrite64(0, mmio + SNB_SBAR45XLAT_OFFSET);
1546 } else {
1547 iowrite32(0, mmio + SNB_SBAR4XLAT_OFFSET);
1548 iowrite32(0, mmio + SNB_SBAR5XLAT_OFFSET);
1549 }
ab760a0c 1550
e26a5843
AH
1551 /* zero outgoing translation limits (whole bar size windows) */
1552 iowrite64(0, mmio + SNB_PBAR23LMT_OFFSET);
1553 if (!ndev->bar4_split) {
1554 iowrite64(0, mmio + SNB_PBAR45LMT_OFFSET);
1555 } else {
1556 iowrite32(0, mmio + SNB_PBAR4LMT_OFFSET);
1557 iowrite32(0, mmio + SNB_PBAR5LMT_OFFSET);
78958433 1558 }
9fec60c4 1559
e26a5843
AH
1560 /* set outgoing translation offsets */
1561 bar_addr = peer_addr->bar2_addr64;
1562 iowrite64(bar_addr, mmio + SNB_PBAR23XLAT_OFFSET);
1563 bar_addr = ioread64(mmio + SNB_PBAR23XLAT_OFFSET);
1564 dev_dbg(ndev_dev(ndev), "PBAR23XLAT %#018llx\n", bar_addr);
1565
1566 if (!ndev->bar4_split) {
1567 bar_addr = peer_addr->bar4_addr64;
1568 iowrite64(bar_addr, mmio + SNB_PBAR45XLAT_OFFSET);
1569 bar_addr = ioread64(mmio + SNB_PBAR45XLAT_OFFSET);
1570 dev_dbg(ndev_dev(ndev), "PBAR45XLAT %#018llx\n", bar_addr);
1571 } else {
1572 bar_addr = peer_addr->bar4_addr32;
1573 iowrite32(bar_addr, mmio + SNB_PBAR4XLAT_OFFSET);
1574 bar_addr = ioread32(mmio + SNB_PBAR4XLAT_OFFSET);
1575 dev_dbg(ndev_dev(ndev), "PBAR4XLAT %#010llx\n", bar_addr);
1576
1577 bar_addr = peer_addr->bar5_addr32;
1578 iowrite32(bar_addr, mmio + SNB_PBAR5XLAT_OFFSET);
1579 bar_addr = ioread32(mmio + SNB_PBAR5XLAT_OFFSET);
1580 dev_dbg(ndev_dev(ndev), "PBAR5XLAT %#010llx\n", bar_addr);
1581 }
9fec60c4 1582
e26a5843
AH
1583 /* set the translation offset for b2b registers */
1584 if (b2b_bar == 0)
1585 bar_addr = peer_addr->bar0_addr;
1586 else if (b2b_bar == 2)
1587 bar_addr = peer_addr->bar2_addr64;
1588 else if (b2b_bar == 4 && !ndev->bar4_split)
1589 bar_addr = peer_addr->bar4_addr64;
1590 else if (b2b_bar == 4)
1591 bar_addr = peer_addr->bar4_addr32;
1592 else if (b2b_bar == 5)
1593 bar_addr = peer_addr->bar5_addr32;
1594 else
1595 return -EIO;
1596
1597 /* B2B_XLAT_OFFSET is 64bit, but can only take 32bit writes */
1598 dev_dbg(ndev_dev(ndev), "B2BXLAT %#018llx\n", bar_addr);
1599 iowrite32(bar_addr, mmio + SNB_B2B_XLAT_OFFSETL);
1600 iowrite32(bar_addr >> 32, mmio + SNB_B2B_XLAT_OFFSETU);
1601
1602 if (b2b_bar) {
1603 /* map peer ntb mmio config space registers */
1604 ndev->peer_mmio = pci_iomap(pdev, b2b_bar,
1605 SNB_B2B_MIN_SIZE);
1606 if (!ndev->peer_mmio)
1607 return -EIO;
9fec60c4
JM
1608 }
1609
e26a5843 1610 return 0;
9fec60c4
JM
1611}
1612
e26a5843 1613static int snb_init_ntb(struct intel_ntb_dev *ndev)
ab760a0c 1614{
e26a5843
AH
1615 int rc;
1616
1617 if (ndev->bar4_split)
1618 ndev->mw_count = HSX_SPLIT_BAR_MW_COUNT;
ab760a0c 1619 else
e26a5843 1620 ndev->mw_count = SNB_MW_COUNT;
ab760a0c 1621
e26a5843
AH
1622 ndev->spad_count = SNB_SPAD_COUNT;
1623 ndev->db_count = SNB_DB_COUNT;
1624 ndev->db_link_mask = SNB_DB_LINK_BIT;
1db97f25 1625
e26a5843
AH
1626 switch (ndev->ntb.topo) {
1627 case NTB_TOPO_PRI:
1628 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
1629 dev_err(ndev_dev(ndev), "NTB Primary config disabled\n");
1630 return -EINVAL;
1631 }
1632 /* use half the spads for the peer */
1633 ndev->spad_count >>= 1;
1634 ndev->self_reg = &snb_pri_reg;
1635 ndev->peer_reg = &snb_sec_reg;
1636 ndev->xlat_reg = &snb_sec_xlat;
1637 break;
1db97f25 1638
e26a5843
AH
1639 case NTB_TOPO_SEC:
1640 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
1641 dev_err(ndev_dev(ndev), "NTB Secondary config disabled\n");
1642 return -EINVAL;
1643 }
1644 /* use half the spads for the peer */
1645 ndev->spad_count >>= 1;
1646 ndev->self_reg = &snb_sec_reg;
1647 ndev->peer_reg = &snb_pri_reg;
1648 ndev->xlat_reg = &snb_pri_xlat;
1649 break;
1db97f25 1650
e26a5843
AH
1651 case NTB_TOPO_B2B_USD:
1652 case NTB_TOPO_B2B_DSD:
1653 ndev->self_reg = &snb_pri_reg;
1654 ndev->peer_reg = &snb_b2b_reg;
1655 ndev->xlat_reg = &snb_sec_xlat;
1db97f25 1656
e26a5843
AH
1657 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
1658 ndev->peer_reg = &snb_pri_reg;
ab760a0c 1659
e26a5843
AH
1660 if (b2b_mw_idx < 0)
1661 ndev->b2b_idx = b2b_mw_idx + ndev->mw_count;
1662 else
1663 ndev->b2b_idx = b2b_mw_idx;
ab760a0c 1664
e26a5843
AH
1665 dev_dbg(ndev_dev(ndev),
1666 "setting up b2b mw idx %d means %d\n",
1667 b2b_mw_idx, ndev->b2b_idx);
1668
1669 } else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) {
1670 dev_warn(ndev_dev(ndev), "Reduce doorbell count by 1\n");
1671 ndev->db_count -= 1;
1672 }
1673
1674 if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
1675 rc = snb_setup_b2b_mw(ndev,
1676 &snb_b2b_dsd_addr,
1677 &snb_b2b_usd_addr);
1678 } else {
1679 rc = snb_setup_b2b_mw(ndev,
1680 &snb_b2b_usd_addr,
1681 &snb_b2b_dsd_addr);
1682 }
1683 if (rc)
1684 return rc;
1685
1686 /* Enable Bus Master and Memory Space on the secondary side */
1687 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
1688 ndev->self_mmio + SNB_SPCICMD_OFFSET);
ab760a0c 1689
1db97f25 1690 break;
e26a5843 1691
1db97f25 1692 default:
e26a5843 1693 return -EINVAL;
1db97f25
DJ
1694 }
1695
e26a5843
AH
1696 ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1697
1698 ndev->reg->db_iowrite(ndev->db_valid_mask,
1699 ndev->self_mmio +
1700 ndev->self_reg->db_mask);
ab760a0c 1701
1db97f25
DJ
1702 return 0;
1703}
1704
e26a5843 1705static int snb_init_dev(struct intel_ntb_dev *ndev)
1db97f25 1706{
e26a5843
AH
1707 struct pci_dev *pdev;
1708 u8 ppd;
1709 int rc, mem;
1710
1711 /* There is a Xeon hardware errata related to writes to SDOORBELL or
1712 * B2BDOORBELL in conjunction with inbound access to NTB MMIO Space,
1713 * which may hang the system. To workaround this use the second memory
1714 * window to access the interrupt and scratch pad registers on the
1715 * remote system.
1716 */
1717 ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP;
1db97f25 1718
e26a5843
AH
1719 /* There is a hardware errata related to accessing any register in
1720 * SB01BASE in the presence of bidirectional traffic crossing the NTB.
1721 */
1722 ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP;
1723
1724 /* HW Errata on bit 14 of b2bdoorbell register. Writes will not be
1725 * mirrored to the remote system. Shrink the number of bits by one,
1726 * since bit 14 is the last bit.
1727 */
1728 ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14;
1db97f25 1729
e26a5843
AH
1730 ndev->reg = &snb_reg;
1731
1732 pdev = ndev_pdev(ndev);
1733
1734 rc = pci_read_config_byte(pdev, SNB_PPD_OFFSET, &ppd);
1db97f25 1735 if (rc)
e26a5843 1736 return -EIO;
1db97f25 1737
e26a5843
AH
1738 ndev->ntb.topo = snb_ppd_topo(ndev, ppd);
1739 dev_dbg(ndev_dev(ndev), "ppd %#x topo %s\n", ppd,
1740 ntb_topo_string(ndev->ntb.topo));
1741 if (ndev->ntb.topo == NTB_TOPO_NONE)
1db97f25 1742 return -EINVAL;
e26a5843
AH
1743
1744 if (ndev->ntb.topo != NTB_TOPO_SEC) {
1745 ndev->bar4_split = snb_ppd_bar4_split(ndev, ppd);
1746 dev_dbg(ndev_dev(ndev), "ppd %#x bar4_split %d\n",
1747 ppd, ndev->bar4_split);
1748 } else {
1749 /* This is a way for transparent BAR to figure out if we are
1750 * doing split BAR or not. There is no way for the hw on the
1751 * transparent side to know and set the PPD.
1752 */
1753 mem = pci_select_bars(pdev, IORESOURCE_MEM);
1754 ndev->bar4_split = hweight32(mem) ==
1755 HSX_SPLIT_BAR_MW_COUNT + 1;
1756 dev_dbg(ndev_dev(ndev), "mem %#x bar4_split %d\n",
1757 mem, ndev->bar4_split);
1db97f25
DJ
1758 }
1759
e26a5843
AH
1760 rc = snb_init_ntb(ndev);
1761 if (rc)
1762 return rc;
1db97f25 1763
e26a5843
AH
1764 return snb_init_isr(ndev);
1765}
1766
1767static void snb_deinit_dev(struct intel_ntb_dev *ndev)
1768{
1769 snb_deinit_isr(ndev);
1db97f25
DJ
1770}
1771
e26a5843 1772static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
1db97f25
DJ
1773{
1774 int rc;
1775
e26a5843
AH
1776 pci_set_drvdata(pdev, ndev);
1777
1778 rc = pci_enable_device(pdev);
1779 if (rc)
1780 goto err_pci_enable;
1781
1782 rc = pci_request_regions(pdev, NTB_NAME);
1783 if (rc)
1784 goto err_pci_regions;
1785
1786 pci_set_master(pdev);
1787
1788 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1789 if (rc) {
1790 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1791 if (rc)
1792 goto err_dma_mask;
1793 dev_warn(ndev_dev(ndev), "Cannot DMA highmem\n");
1794 }
1db97f25 1795
e26a5843
AH
1796 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1797 if (rc) {
1798 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1799 if (rc)
1800 goto err_dma_mask;
1801 dev_warn(ndev_dev(ndev), "Cannot DMA consistent highmem\n");
1802 }
1803
1804 ndev->self_mmio = pci_iomap(pdev, 0, 0);
1805 if (!ndev->self_mmio) {
1806 rc = -EIO;
1807 goto err_mmio;
1808 }
1809 ndev->peer_mmio = ndev->self_mmio;
1db97f25
DJ
1810
1811 return 0;
e26a5843
AH
1812
1813err_mmio:
1814err_dma_mask:
1815 pci_clear_master(pdev);
1816 pci_release_regions(pdev);
1817err_pci_regions:
1818 pci_disable_device(pdev);
1819err_pci_enable:
1820 pci_set_drvdata(pdev, NULL);
1821 return rc;
1db97f25
DJ
1822}
1823
e26a5843 1824static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev)
fce8a7bb 1825{
e26a5843 1826 struct pci_dev *pdev = ndev_pdev(ndev);
fce8a7bb 1827
e26a5843
AH
1828 if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio)
1829 pci_iounmap(pdev, ndev->peer_mmio);
1830 pci_iounmap(pdev, ndev->self_mmio);
fce8a7bb 1831
e26a5843
AH
1832 pci_clear_master(pdev);
1833 pci_release_regions(pdev);
1834 pci_disable_device(pdev);
1835 pci_set_drvdata(pdev, NULL);
1836}
069684e8 1837
e26a5843
AH
1838static inline void ndev_init_struct(struct intel_ntb_dev *ndev,
1839 struct pci_dev *pdev)
1840{
1841 ndev->ntb.pdev = pdev;
1842 ndev->ntb.topo = NTB_TOPO_NONE;
1843 ndev->ntb.ops = &intel_ntb_ops;
069684e8 1844
e26a5843
AH
1845 ndev->b2b_off = 0;
1846 ndev->b2b_idx = INT_MAX;
fce8a7bb 1847
e26a5843 1848 ndev->bar4_split = 0;
fce8a7bb 1849
e26a5843
AH
1850 ndev->mw_count = 0;
1851 ndev->spad_count = 0;
1852 ndev->db_count = 0;
1853 ndev->db_vec_count = 0;
1854 ndev->db_vec_shift = 0;
fce8a7bb 1855
e26a5843
AH
1856 ndev->ntb_ctl = 0;
1857 ndev->lnk_sta = 0;
1db97f25 1858
e26a5843
AH
1859 ndev->db_valid_mask = 0;
1860 ndev->db_link_mask = 0;
1861 ndev->db_mask = 0;
ab760a0c 1862
e26a5843
AH
1863 spin_lock_init(&ndev->db_mask_lock);
1864}
ab760a0c 1865
e26a5843
AH
1866static int intel_ntb_pci_probe(struct pci_dev *pdev,
1867 const struct pci_device_id *id)
1868{
1869 struct intel_ntb_dev *ndev;
1870 int rc;
fce8a7bb 1871
e26a5843
AH
1872 if (pdev_is_bwd(pdev)) {
1873 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
1874 if (!ndev) {
1875 rc = -ENOMEM;
1876 goto err_ndev;
1877 }
fce8a7bb 1878
e26a5843 1879 ndev_init_struct(ndev, pdev);
ab760a0c 1880
e26a5843
AH
1881 rc = intel_ntb_init_pci(ndev, pdev);
1882 if (rc)
1883 goto err_init_pci;
1884
1885 rc = bwd_init_dev(ndev);
1886 if (rc)
1887 goto err_init_dev;
ab760a0c 1888
e26a5843
AH
1889 } else if (pdev_is_snb(pdev)) {
1890 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
1891 if (!ndev) {
1892 rc = -ENOMEM;
1893 goto err_ndev;
fce8a7bb 1894 }
fce8a7bb 1895
e26a5843 1896 ndev_init_struct(ndev, pdev);
fce8a7bb 1897
e26a5843
AH
1898 rc = intel_ntb_init_pci(ndev, pdev);
1899 if (rc)
1900 goto err_init_pci;
fce8a7bb 1901
e26a5843 1902 rc = snb_init_dev(ndev);
fce8a7bb 1903 if (rc)
e26a5843 1904 goto err_init_dev;
fce8a7bb 1905
e26a5843
AH
1906 } else {
1907 rc = -EINVAL;
1908 goto err_ndev;
fce8a7bb
JM
1909 }
1910
e26a5843 1911 ndev_reset_unsafe_flags(ndev);
fce8a7bb 1912
e26a5843 1913 ndev->reg->poll_link(ndev);
fce8a7bb 1914
e26a5843 1915 ndev_init_debugfs(ndev);
fce8a7bb 1916
e26a5843 1917 rc = ntb_register_device(&ndev->ntb);
fce8a7bb 1918 if (rc)
e26a5843 1919 goto err_register;
fce8a7bb
JM
1920
1921 return 0;
1922
e26a5843
AH
1923err_register:
1924 ndev_deinit_debugfs(ndev);
1925 if (pdev_is_bwd(pdev))
1926 bwd_deinit_dev(ndev);
1927 else if (pdev_is_snb(pdev))
1928 snb_deinit_dev(ndev);
1929err_init_dev:
1930 intel_ntb_deinit_pci(ndev);
1931err_init_pci:
fce8a7bb 1932 kfree(ndev);
e26a5843 1933err_ndev:
fce8a7bb
JM
1934 return rc;
1935}
1936
e26a5843 1937static void intel_ntb_pci_remove(struct pci_dev *pdev)
fce8a7bb 1938{
e26a5843
AH
1939 struct intel_ntb_dev *ndev = pci_get_drvdata(pdev);
1940
1941 ntb_unregister_device(&ndev->ntb);
1942 ndev_deinit_debugfs(ndev);
1943 if (pdev_is_bwd(pdev))
1944 bwd_deinit_dev(ndev);
1945 else if (pdev_is_snb(pdev))
1946 snb_deinit_dev(ndev);
1947 intel_ntb_deinit_pci(ndev);
1948 kfree(ndev);
1949}
fce8a7bb 1950
e26a5843
AH
1951static const struct intel_ntb_reg bwd_reg = {
1952 .poll_link = bwd_poll_link,
1953 .link_is_up = bwd_link_is_up,
1954 .db_ioread = bwd_db_ioread,
1955 .db_iowrite = bwd_db_iowrite,
1956 .db_size = sizeof(u64),
1957 .ntb_ctl = BWD_NTBCNTL_OFFSET,
1958 .mw_bar = {2, 4},
1959};
fce8a7bb 1960
e26a5843
AH
1961static const struct intel_ntb_alt_reg bwd_pri_reg = {
1962 .db_bell = BWD_PDOORBELL_OFFSET,
1963 .db_mask = BWD_PDBMSK_OFFSET,
1964 .spad = BWD_SPAD_OFFSET,
1965};
fce8a7bb 1966
e26a5843
AH
1967static const struct intel_ntb_alt_reg bwd_b2b_reg = {
1968 .db_bell = BWD_B2B_DOORBELL_OFFSET,
1969 .spad = BWD_B2B_SPAD_OFFSET,
1970};
fce8a7bb 1971
e26a5843
AH
1972static const struct intel_ntb_xlat_reg bwd_sec_xlat = {
1973 /* FIXME : .bar0_base = BWD_SBAR0BASE_OFFSET, */
1974 /* FIXME : .bar2_limit = BWD_SBAR2LMT_OFFSET, */
1975 .bar2_xlat = BWD_SBAR2XLAT_OFFSET,
1976};
ab760a0c 1977
e26a5843
AH
1978static const struct intel_ntb_reg snb_reg = {
1979 .poll_link = snb_poll_link,
1980 .link_is_up = snb_link_is_up,
1981 .db_ioread = snb_db_ioread,
1982 .db_iowrite = snb_db_iowrite,
1983 .db_size = sizeof(u32),
1984 .ntb_ctl = SNB_NTBCNTL_OFFSET,
1985 .mw_bar = {2, 4, 5},
1986};
fce8a7bb 1987
e26a5843
AH
1988static const struct intel_ntb_alt_reg snb_pri_reg = {
1989 .db_bell = SNB_PDOORBELL_OFFSET,
1990 .db_mask = SNB_PDBMSK_OFFSET,
1991 .spad = SNB_SPAD_OFFSET,
1992};
1993
1994static const struct intel_ntb_alt_reg snb_sec_reg = {
1995 .db_bell = SNB_SDOORBELL_OFFSET,
1996 .db_mask = SNB_SDBMSK_OFFSET,
1997 /* second half of the scratchpads */
1998 .spad = SNB_SPAD_OFFSET + (SNB_SPAD_COUNT << 1),
1999};
fce8a7bb 2000
e26a5843
AH
2001static const struct intel_ntb_alt_reg snb_b2b_reg = {
2002 .db_bell = SNB_B2B_DOORBELL_OFFSET,
2003 .spad = SNB_B2B_SPAD_OFFSET,
2004};
2005
2006static const struct intel_ntb_xlat_reg snb_pri_xlat = {
2007 /* Note: no primary .bar0_base visible to the secondary side.
2008 *
2009 * The secondary side cannot get the base address stored in primary
2010 * bars. The base address is necessary to set the limit register to
2011 * any value other than zero, or unlimited.
2012 *
2013 * WITHOUT THE BASE ADDRESS, THE SECONDARY SIDE CANNOT DISABLE the
2014 * window by setting the limit equal to base, nor can it limit the size
2015 * of the memory window by setting the limit to base + size.
2016 */
2017 .bar2_limit = SNB_PBAR23LMT_OFFSET,
2018 .bar2_xlat = SNB_PBAR23XLAT_OFFSET,
2019};
2020
2021static const struct intel_ntb_xlat_reg snb_sec_xlat = {
2022 .bar0_base = SNB_SBAR0BASE_OFFSET,
2023 .bar2_limit = SNB_SBAR23LMT_OFFSET,
2024 .bar2_xlat = SNB_SBAR23XLAT_OFFSET,
2025};
2026
2027static const struct intel_b2b_addr snb_b2b_usd_addr = {
2028 .bar2_addr64 = SNB_B2B_BAR2_USD_ADDR64,
2029 .bar4_addr64 = SNB_B2B_BAR4_USD_ADDR64,
2030 .bar4_addr32 = SNB_B2B_BAR4_USD_ADDR32,
2031 .bar5_addr32 = SNB_B2B_BAR5_USD_ADDR32,
2032};
2033
2034static const struct intel_b2b_addr snb_b2b_dsd_addr = {
2035 .bar2_addr64 = SNB_B2B_BAR2_DSD_ADDR64,
2036 .bar4_addr64 = SNB_B2B_BAR4_DSD_ADDR64,
2037 .bar4_addr32 = SNB_B2B_BAR4_DSD_ADDR32,
2038 .bar5_addr32 = SNB_B2B_BAR5_DSD_ADDR32,
2039};
2040
2041/* operations for primary side of local ntb */
2042static const struct ntb_dev_ops intel_ntb_ops = {
2043 .mw_count = intel_ntb_mw_count,
2044 .mw_get_range = intel_ntb_mw_get_range,
2045 .mw_set_trans = intel_ntb_mw_set_trans,
2046 .link_is_up = intel_ntb_link_is_up,
2047 .link_enable = intel_ntb_link_enable,
2048 .link_disable = intel_ntb_link_disable,
2049 .db_is_unsafe = intel_ntb_db_is_unsafe,
2050 .db_valid_mask = intel_ntb_db_valid_mask,
2051 .db_vector_count = intel_ntb_db_vector_count,
2052 .db_vector_mask = intel_ntb_db_vector_mask,
2053 .db_read = intel_ntb_db_read,
2054 .db_clear = intel_ntb_db_clear,
2055 .db_set_mask = intel_ntb_db_set_mask,
2056 .db_clear_mask = intel_ntb_db_clear_mask,
2057 .peer_db_addr = intel_ntb_peer_db_addr,
2058 .peer_db_set = intel_ntb_peer_db_set,
2059 .spad_is_unsafe = intel_ntb_spad_is_unsafe,
2060 .spad_count = intel_ntb_spad_count,
2061 .spad_read = intel_ntb_spad_read,
2062 .spad_write = intel_ntb_spad_write,
2063 .peer_spad_addr = intel_ntb_peer_spad_addr,
2064 .peer_spad_read = intel_ntb_peer_spad_read,
2065 .peer_spad_write = intel_ntb_peer_spad_write,
2066};
2067
2068static const struct file_operations intel_ntb_debugfs_info = {
2069 .owner = THIS_MODULE,
2070 .open = simple_open,
2071 .read = ndev_debugfs_read,
2072};
2073
2074static const struct pci_device_id intel_ntb_pci_tbl[] = {
2075 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
2076 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
2077 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
2078 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
2079 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
2080 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
2081 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
2082 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
2083 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
2084 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
2085 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
2086 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
2087 {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
2088 {0}
2089};
2090MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
2091
2092static struct pci_driver intel_ntb_pci_driver = {
fce8a7bb 2093 .name = KBUILD_MODNAME,
e26a5843
AH
2094 .id_table = intel_ntb_pci_tbl,
2095 .probe = intel_ntb_pci_probe,
2096 .remove = intel_ntb_pci_remove,
fce8a7bb 2097};
6465d02e 2098
e26a5843
AH
2099static int __init intel_ntb_pci_driver_init(void)
2100{
2101 if (debugfs_initialized())
2102 debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
2103
2104 return pci_register_driver(&intel_ntb_pci_driver);
2105}
2106module_init(intel_ntb_pci_driver_init);
2107
2108static void __exit intel_ntb_pci_driver_exit(void)
2109{
2110 pci_unregister_driver(&intel_ntb_pci_driver);
2111
2112 debugfs_remove_recursive(debugfs_dir);
2113}
2114module_exit(intel_ntb_pci_driver_exit);
2115
This page took 0.239793 seconds and 5 git commands to generate.