using these calls except with such hotplug-deficient drivers.
struct platform_device *platform_device_alloc(
- char *name, unsigned id);
+ const char *name, int id);
You can use platform_device_alloc() to dynamically allocate a device, which
you will then initialize with resources and platform_device_register().
A better solution is usually:
struct platform_device *platform_device_register_simple(
- char *name, unsigned id,
- struct resource *res, unsigned nres);
+ const char *name, int id,
+ struct resource *res, unsigned int nres);
You can use platform_device_register_simple() as a one-step call to allocate
and register a device.
--- /dev/null
+NOTE:
+This is Japanese translated version of "Documentation/stable_kernel_rules.txt".
+This one is maintained by Tsugikazu Shibata <tshibata@ab.jp.nec.com>
+and JF Project team <www.linux.or.jp/JF>.
+If you find difference with original file or problem in translation,
+please contact maintainer of this file or JF project.
+
+Please also note that purpose of this file is easier to read for non
+English natives and do no intended to fork. So, if you have any
+comment or update of this file, please try to update Original(English)
+file at first.
+
+==================================
+ã\81\93ã\82\8cã\81¯ã\80\81
+linux-2.6.24/Documentation/stable_kernel_rules.txt
+ã\81®å\92\8c訳ã\81§ã\81\99ã\80\82
+
+翻訳å\9b£ä½\93ï¼\9a JF ã\83\97ã\83Âã\82¸ã\82§ã\82¯ã\83\88 < http://www.linux.or.jp/JF/ >
+翻訳æ\97¥ï¼\9a 2007/12/30
+翻訳è\80\85ï¼\9a Tsugikazu Shibata <tshibata at ab dot jp dot nec dot com>
+æ ¡æÂ£è\80\85ï¼\9a æÂ¦äº\95伸å\85\89ã\81\95ã\82\93ã\80\81<takei at webmasters dot gr dot jp>
+ ã\81\8bã\81Âã\81\93ã\81\95ã\82\93 (Seiji Kaneko) <skaneko at a2 dot mbn dot or dot jp>
+ å°\8fæ\9e\97 é\9b\85å\85¸ã\81\95ã\82\93 (Masanori Kobayasi) <zap03216 at nifty dot ne dot jp>
+ é\87\8eå\8f£ã\81\95ã\82\93 (Kenji Noguchi) <tokyo246 at gmail dot com>
+ ç¥\9e宮信太é\83\8eã\81\95ã\82\93 <jin at libjingu dot jp>
+==================================
+
+ã\81\9aã\81£ã\81¨ç\9f¥ã\82\8aã\81\9fã\81\8bã\81£ã\81\9f Linux 2.6 -stable ã\83ªã\83ªã\83¼ã\82¹ã\81®å\85¨ã\81¦
+
+"-stable" ã\83\84ã\83ªã\83¼ã\81«ã\81©ã\81®ã\82\88ã\81\86ã\81ªç¨®é¡\9eã\81®ã\83\91ã\83\83ã\83\81ã\81\8cå\8f\97ã\81\91å\85¥ã\82\8cã\82\89ã\82\8cã\82\8bã\81\8bã\80\81ã\81©ã\81®ã\82\88ã\81\86ã\81ª
+ã\82\82ã\81®ã\81\8cå\8f\97ã\81\91å\85¥ã\82\8cã\82\89ã\82\8cã\81ªã\81\84ã\81\8bã\80\81ã\81«ã\81¤ã\81\84ã\81¦ã\81®è¦\8få\89\87-
+
+ - æ\98\8eã\82\89ã\81\8bã\81«æÂ£ã\81\97ã\81\8fã\80\81ã\83\86ã\82¹ã\83\88ã\81\95ã\82\8cã\81¦ã\81\84ã\82\8bã\82\82ã\81®ã\81§ã\81ªã\81\91ã\82\8cã\81°ã\81ªã\82\89ã\81ªã\81\84ã\80\82
+ - æ\96\87è\84\88(å¤\89æ\9b´è¡\8cã\81®å\89\8då¾\8c)ã\82\92å\90«ã\82\81ã\81¦ 100 è¡\8cã\82\88ã\82\8a大ã\81\8dã\81\8fã\81¦ã\81¯ã\81\84ã\81\91ã\81ªã\81\84ã\80\82
+ - ã\81\9fã\81 ä¸\80Ã¥\80\8bã\81®ã\81\93ã\81¨ã\81 ã\81\91ã\82\92ä¿®æÂ£ã\81\97ã\81¦ã\81\84ã\82\8bã\81¹ã\81\8dã\80\82
+ - ç\9a\86ã\82\92æ\82©ã\81¾ã\81\9bã\81¦ã\81\84ã\82\8bæ\9c\89©ã\81®ã\83\90ã\82°ã\82\92ä¿®æÂ£ã\81\97ã\81ªã\81\91ã\82\8cã\81°ã\81ªã\82\89ã\81ªã\81\84ã\80\82("ã\81\93ã\82\8cã\81¯ã\83\90ã\82°ã\81§
+ ã\81\82ã\82\8bã\81\8bã\82\82ã\81\97ã\82\8cã\81ªã\81\84ã\81\8c..." ã\81®ã\82\88ã\81\86ã\81ªã\82\82ã\81®ã\81§ã\81¯ã\81ªã\81\84)
+ - ã\83\93ã\83«ã\83\89ã\82¨ã\83©ã\83¼(CONFIG_BROKENã\81«ã\81ªã\81£ã\81¦ã\81\84ã\82\8bã\82\82ã\81®ã\82\92é\99¤ã\81\8f), oops, ã\83\8fã\83³ã\82°ã\80\81ã\83\87ã\83¼
+ ã\82¿ç ´å£\8aã\80\81ç\8f¾å®\9fã\81®ã\82ȋ\82Âã\83¥ã\83ªã\83\86ã\82£å\95\8fé¡\8cã\80\81ã\81\9dã\81®ä»\96 "ã\81\82ã\81\82ã\80\81ã\81\93ã\82\8cã\81¯ã\83\80ã\83¡ã\81 ã\81Â"ã\81¨ã\81\84ã\81\86
+ ã\82\88ã\81\86ã\81ªã\82\82ã\81®ã\82\92ä¿®æÂ£ã\81\97ã\81ªã\81\91ã\82\8cã\81°ã\81ªã\82\89ã\81ªã\81\84ã\80\82ç\9fÂã\81\8fè¨\80ã\81\88ã\81°ã\80\81é\87\8d大ã\81ªå\95\8fé¡\8cã\80\82
+ - ã\81©ã\81®ã\82\88ã\81\86ã\81«ç«¶å\90\88ç\8a¶æ\85\8bã\81\8cç\99ºç\94\9fã\81\99ã\82\8bã\81\8bã\81®èª¬æ\98\8eã\82\82ä¸\80ç·\92ã\81«æ\9b¸ã\81\8bã\82\8cã\81¦ã\81\84ã\81ªã\81\84é\99\90ã\82\8aã\80\81
+ "ç\90\86è«\96ç\9a\84ã\81«ã\81¯ç«¶å\90\88ç\8a¶æ\85\8bã\81«ã\81ªã\82\8b"ã\82\88ã\81\86ã\81ªã\82\82ã\81®ã\81¯ä¸\8då\8f¯ã\80\82
+ - ã\81\84ã\81\8bã\81ªã\82\8bäº\9bç´°ã\81ªä¿®æÂ£ã\82\82Ã¥\90«ã\82\81ã\82\8bã\81\93ã\81¨ã\81¯ã\81§ã\81\8dã\81ªã\81\84ã\80\82(ã\82¹ã\83\9aã\83«ã\81®ä¿®æÂ£ã\80\81空ç\99½ã\81®ã\82¯ã\83ªã\83¼
+ ã\83³ã\82¢ã\83\83ã\83\97ã\81ªã\81©)
+ - 対å¿\9cã\81\99ã\82\8bã\82µã\83\96ã\82·ã\82¹ã\83\86ã\83 ã\83¡ã\83³ã\83\86ã\83\8aã\81\8cå\8f\97ã\81\91å\85¥ã\82\8cã\81\9fã\82\82ã\81®ã\81§ã\81ªã\81\91ã\82\8cã\81°ã\81ªã\82\89ã\81ªã\81\84ã\80\82
+ - Documentation/SubmittingPatches ã\81®è¦\8få\89\87ã\81«å¾\93ã\81£ã\81\9fã\82\82ã\81®ã\81§ã\81ªã\81\91ã\82\8cã\81°ã\81ªã\82\89ã\81ªã\81\84ã\80\82
+
+-stable ã\83\84ã\83ªã\83¼ã\81«ã\83\91ã\83\83ã\83\81ã\82\92é\80\81ä»\98ã\81\99ã\82\8bæ\89\8bç¶\9aã\81\8d-
+
+ - ä¸\8aè¨\98ã\81®è¦\8få\89\87ã\81«å¾\93ã\81£ã\81¦ã\81\84ã\82\8bã\81\8bã\82\92確èª\8dã\81\97ã\81\9få¾\8cã\81«ã\80\81stable@kernel.org ã\81«ã\83\91ã\83\83ã\83\81
+ ã\82\92é\80\81ã\82\8bã\80\82
+ - é\80\81ä¿¡è\80\85ã\81¯ã\83\91ã\83\83ã\83\81ã\81\8cã\82Âã\83¥ã\83¼ã\81«å\8f\97ã\81\91ä»\98ã\81\91ã\82\89ã\82\8cã\81\9fé\9a\9bã\81«ã\81¯ ACK ã\82\92ã\80\81Ã¥\8d´ä¸\8bã\81\95ã\82\8cã\81\9få ´å\90\88
+ ã\81«ã\81¯ NAK ã\82\92å\8f\97ã\81\91å\8f\96ã\82\8bã\80\82ã\81\93ã\81®å\8f\8då¿\9cã\81¯é\96\8bç\99ºè\80\85ã\81\9fã\81¡ã\81®ã\82¹ã\82±ã\82¸ã\83¥ã\83¼ã\83«ã\81«ã\82\88ã\81£ã\81¦ã\80\81æ\95°
+ æ\97¥ã\81\8bã\81\8bã\82\8bå ´å\90\88ã\81\8cã\81\82ã\82\8bã\80\82
+ - ã\82\82ã\81\97å\8f\97ã\81\91å\8f\96ã\82\89ã\82\8cã\81\9fã\82\89ã\80\81ã\83\91ã\83\83ã\83\81ã\81¯ä»\96ã\81®é\96\8bç\99ºè\80\85ã\81\9fã\81¡ã\81®ã\83¬ã\83\93ã\83¥ã\83¼ã\81®ã\81\9fã\82\81ã\81«
+ -stable ã\82Âã\83¥ã\83¼ã\81«è¿½å\8a ã\81\95ã\82\8cã\82\8bã\80\82
+ - ã\82ȋ\82Âã\83¥ã\83ªã\83\86ã\82£ã\83\91ã\83\83ã\83\81ã\81¯ã\81\93ã\81®ã\82¨ã\82¤ã\83ªã\82¢ã\82¹ (stable@kernel.org) ã\81«é\80\81ã\82\89ã\82\8cã\82\8bã\81¹
+ ã\81\8dã\81§ã\81¯ã\81ªã\81\8fã\80\81代ã\82\8fã\82\8aã\81« security@kernel.org ã\81®ã\82¢ã\83\89ã\83¬ã\82¹ã\81«é\80\81ã\82\89ã\82\8cã\82\8bã\80\82
+
+ã\83¬ã\83\93ã\83¥ã\83¼ã\82µã\82¤ã\82¯ã\83«-
+
+ - -stable ã\83¡ã\83³ã\83\86ã\83\8aã\81\8cã\83¬ã\83\93ã\83¥ã\83¼ã\82µã\82¤ã\82¯ã\83«ã\82\92決ã\82\81ã\82\8bã\81¨ã\81\8dã\80\81ã\83\91ã\83\83ã\83\81ã\81¯ã\83¬ã\83\93ã\83¥ã\83¼å§\94
+ å\93¡ä¼\9aã\81¨ã\83\91ã\83\83ã\83\81ã\81\8cå½±é\9f¿ã\81\99ã\82\8bé \98å\9f\9fã\81®ã\83¡ã\83³ã\83\86ã\83\8a(æ\8f\90ä¾\9bè\80\85ã\81\8cã\81\9dã\81®é \98å\9f\9fã\81®ã\83¡ã\83³ã\83\86ã\83\8aã\81§ç\84¡
+ ã\81\84é\99\90ã\82\8a)ã\81«é\80\81ã\82\89ã\82\8cã\80\81linux-kernel ã\83¡ã\83¼ã\83ªã\83³ã\82°ã\83ªã\82¹ã\83\88ã\81«CCã\81\95ã\82\8cã\82\8bã\80\82
+ - ã\83¬ã\83\93ã\83¥ã\83¼å§\94å\93¡ä¼\9aã\81¯ 48æ\99\82é\96\93ã\81®é\96\93ã\81« ACK ã\81\8b NAK ã\82\92å\87ºã\81\99ã\80\82
+ - ã\82\82ã\81\97ã\83\91ã\83\83ã\83\81ã\81\8cå§\94å\93¡ä¼\9aã\81®ã\83¡ã\83³ã\83\90ã\81\8bã\82\89å\8d´ä¸\8bã\82\8cã\82\8bã\81\8bã\80\81ã\83¡ã\83³ã\83\86ã\83\8aé\81\94ã\82\84ã\83¡ã\83³ã\83\90ã\81\8cæ°\97ä»\98
+ ã\81\8bã\81ªã\81\8bã\81£ã\81\9fÃ¥\95\8fé¡\8cã\81\8cæ\8c\81ã\81¡ã\81\82ã\81\8cã\82\8aã\80\81linux-kernel ã\83¡ã\83³ã\83\90ã\81\8cã\83\91ã\83\83ã\83\81ã\81«ç\95°è°ã\82\92Ã¥\94±ã\81\88
+ ã\81\9få ´å\90\88ã\81«ã\81¯ã\80\81ã\83\91ã\83\83ã\83\81ã\81¯ã\82Âã\83¥ã\83¼ã\81\8bã\82\89Ã¥\89\8aé\99¤ã\81\95ã\82\8cã\82\8bã\80\82
+ - ã\83¬ã\83\93ã\83¥ã\83¼ã\82µã\82¤ã\82¯ã\83«ã\81®æ\9c\80å¾\8cã\81«ã\80\81ACK ã\82\92å\8f\97ã\81\91ã\81\9fã\83\91ã\83\83ã\83\81ã\81¯æ\9c\80æ\96°ã\81® -stable ã\83ªã\83ªã\83¼
+ ã\82¹ã\81«è¿½å\8a ã\81\95ã\82\8cã\80\81ã\81\9dã\81®å¾\8cã\81«æ\96°ã\81\97ã\81\84 -stable ã\83ªã\83ªã\83¼ã\82¹ã\81\8cè¡\8cã\82\8fã\82\8cã\82\8bã\80\82
+ - ã\82ȋ\82Âã\83¥ã\83ªã\83\86ã\82£ã\83\91ã\83\83ã\83\81ã\81¯ã\80\81é\80\9a常ã\81®ã\83‹\83\93ã\83¥ã\83¼ã\82µã\82¤ã\82¯ã\83«ã\82\92é\80\9aã\82\89ã\81\9aã\80\81ã\82ȋ\82Âã\83¥ã\83ªã\83\86ã\82£
+ ã\82«ã\83¼ã\83\8dã\83«ã\83\81ã\83¼ã\83 ã\81\8bã\82\89ç\9b´æ\8e¥ -stable ã\83\84ã\83ªã\83¼ã\81«å\8f\97ã\81\91ä»\98ã\81\91ã\82\89ã\82\8cã\82\8bã\80\82
+ ã\81\93ã\81®æ\89\8bç¶\9aã\81\8dã\81®è©³ç´°ã\81«ã\81¤ã\81\84ã\81¦ã\81¯ kernel security ã\83\81ã\83¼ã\83 ã\81«å\95\8fã\81\84å\90\88ã\82\8fã\81\9bã\82\8bã\81\93ã\81¨ã\80\82
+
+ã\83¬ã\83\93ã\83¥ã\83¼å§\94å\93¡ä¼\9a-
+
+ - ã\81\93ã\81®å§\94å\93¡ä¼\9aã\81¯ã\80\81ã\81\93ã\81®ã\82¿ã\82¹ã\82¯ã\81«ã\81¤ã\81\84ã\81¦æ´»å\8b\95ã\81\99ã\82\8bå¤\9aã\81\8fã\81®ã\83\9cã\83©ã\83³ã\83\86ã\82£ã\82¢ã\81¨ã\80\81å°\91æ\95°ã\81®
+ é\9d\9eã\83\9cã\83©ã\83³ã\83\86ã\82£ã\82¢ã\81®ã\82«ã\83¼ã\83\8dã\83«é\96\8bç\99ºè\80\85é\81\94ã\81§æ§\8bæ\88\90ã\81\95ã\82\8cã\81¦ã\81\84ã\82\8bã\80\82
+
it has been replaced by a better system and you
should be using that.
-3C359 NETWORK DRIVER
-P: Mike Phillips
-M: mikep@linuxtr.net
-L: netdev@vger.kernel.org
-W: http://www.linuxtr.net
-S: Maintained
-
3C505 NETWORK DRIVER
P: Philip Blundell
M: philb@gnu.org
S: Maintained
BONDING DRIVER
-P: Chad Tindel
-M: ctindel@users.sourceforge.net
P: Jay Vosburgh
M: fubar@us.ibm.com
L: bonding-devel@lists.sourceforge.net
W: http://oss.oracle.com/projects/ocfs2/
S: Supported
-OLYMPIC NETWORK DRIVER
-P: Peter De Shrijver
-M: p2@ace.ulyssis.student.kuleuven.ac.be
-P: Mike Phillips
-M: mikep@linuxtr.net
-L: netdev@vger.kernel.org
-W: http://www.linuxtr.net
-S: Maintained
-
OMNIKEY CARDMAN 4000 DRIVER
P: Harald Welte
M: laforge@gnumonks.org
W: http://sourceforge.net/projects/tlan/
S: Maintained
-TOKEN-RING NETWORK DRIVER
-P: Mike Phillips
-M: mikep@linuxtr.net
-L: netdev@vger.kernel.org
-W: http://www.linuxtr.net
-S: Maintained
-
TOSHIBA ACPI EXTRAS DRIVER
P: John Belmonte
M: toshiba_acpi@memebeam.org
int device_create_file(struct device *dev, struct device_attribute *attr)
{
int error = 0;
- if (get_device(dev)) {
+ if (dev)
error = sysfs_create_file(&dev->kobj, &attr->attr);
- put_device(dev);
- }
return error;
}
*/
void device_remove_file(struct device *dev, struct device_attribute *attr)
{
- if (get_device(dev)) {
+ if (dev)
sysfs_remove_file(&dev->kobj, &attr->attr);
- put_device(dev);
- }
}
/**
}
EXPORT_SYMBOL_GPL(device_create);
-/**
- * find_device - finds a device that was created with device_create()
- * @class: pointer to the struct class that this device was registered with
- * @devt: the dev_t of the device that was previously registered
- */
-static struct device *find_device(struct class *class, dev_t devt)
+static int __match_devt(struct device *dev, void *data)
{
- struct device *dev = NULL;
- struct device *dev_tmp;
+ dev_t *devt = data;
- down(&class->sem);
- list_for_each_entry(dev_tmp, &class->devices, node) {
- if (dev_tmp->devt == devt) {
- dev = dev_tmp;
- break;
- }
- }
- up(&class->sem);
- return dev;
+ return dev->devt == *devt;
}
/**
{
struct device *dev;
- dev = find_device(class, devt);
- if (dev)
+ dev = class_find_device(class, &devt, __match_devt);
+ if (dev) {
+ put_device(dev);
device_unregister(dev);
+ }
}
EXPORT_SYMBOL_GPL(device_destroy);
{
struct device *dev;
- dev = find_device(class, devt);
- if (dev)
+ dev = class_find_device(class, &devt, __match_devt);
+ if (dev) {
device_pm_schedule_removal(dev);
+ put_device(dev);
+ }
}
EXPORT_SYMBOL_GPL(destroy_suspended_device);
#endif /* CONFIG_PM_SLEEP */
struct driver_attribute *attr)
{
int error;
- if (get_driver(drv)) {
+ if (drv)
error = sysfs_create_file(&drv->p->kobj, &attr->attr);
- put_driver(drv);
- } else
+ else
error = -EINVAL;
return error;
}
void driver_remove_file(struct device_driver *drv,
struct driver_attribute *attr)
{
- if (get_driver(drv)) {
+ if (drv)
sysfs_remove_file(&drv->p->kobj, &attr->attr);
- put_driver(drv);
- }
}
EXPORT_SYMBOL_GPL(driver_remove_file);
list_move_tail(&dev->power.entry, &dpm_destroy);
mutex_unlock(&dpm_list_mtx);
}
+EXPORT_SYMBOL_GPL(device_pm_schedule_removal);
/**
* pm_sleep_lock - mutual exclusion for registration and suspend
extern void device_pm_add(struct device *);
extern void device_pm_remove(struct device *);
-extern void device_pm_schedule_removal(struct device *);
extern int pm_sleep_lock(void);
extern void pm_sleep_unlock(void);
vma->vm_ops = &drm_vm_dma_ops;
vma->vm_flags |= VM_RESERVED; /* Don't swap */
+ vma->vm_flags |= VM_DONTEXPAND;
vma->vm_file = filp; /* Needed for drm_vm_open() */
drm_vm_open_locked(vma);
return -EINVAL; /* This should never happen. */
}
vma->vm_flags |= VM_RESERVED; /* Don't swap */
+ vma->vm_flags |= VM_DONTEXPAND;
vma->vm_file = filp; /* Needed for drm_vm_open() */
drm_vm_open_locked(vma);
vdata->refcnt = ATOMIC_INIT(1);
vma->vm_private_data = vdata;
- vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP);
+ vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND);
if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &mspec_vm_ops;
* nozomi.c -- HSDPA driver Broadband Wireless Data Card - Globe Trotter
*
* Written by: Ulf Jakobsson,
- * Jan �erfeldt,
+ * Jan Ã…kerfeldt,
* Stefan Thomasson,
*
* Maintained by: Paul Hardwick (p.hardwick@option.com)
* --------------------------------------------------------------------------
*/
-/*
- * CHANGELOG
- * Version 2.1d
- * 11-November-2007 Jiri Slaby, Frank Seidel
- * - Big rework of multicard support by Jiri
- * - Major cleanups (semaphore to mutex, endianess, no major reservation)
- * - Optimizations
- *
- * Version 2.1c
- * 30-October-2007 Frank Seidel
- * - Completed multicard support
- * - Minor cleanups
- *
- * Version 2.1b
- * 07-August-2007 Frank Seidel
- * - Minor cleanups
- * - theoretical multicard support
- *
- * Version 2.1
- * 03-July-2006 Paul Hardwick
- *
- * - Stability Improvements. Incorporated spinlock wraps patch.
- * - Updated for newer 2.6.14+ kernels (tty_buffer_request_room)
- * - using __devexit macro for tty
- *
- *
- * Version 2.0
- * 08-feb-2006 15:34:10:Ulf
- *
- * -Fixed issue when not waking up line disipine layer, could probably result
- * in better uplink performance for 2.4.
- *
- * -Fixed issue with big endian during initalization, now proper toggle flags
- * are handled between preloader and maincode.
- *
- * -Fixed flow control issue.
- *
- * -Added support for setting DTR.
- *
- * -For 2.4 kernels, removing temporary buffer that's not needed.
- *
- * -Reading CTS only for modem port (only port that supports it).
- *
- * -Return 0 in write_room instead of netative value, it's not handled in
- * upper layer.
- *
- * --------------------------------------------------------------------------
- * Version 1.0
- *
- * First version of driver, only tested with card of type F32_2.
- * Works fine with 2.4 and 2.6 kernels.
- * Driver also support big endian architecture.
- */
-
/* Enable this to have a lot of debug printouts */
#define DEBUG
/* Do we need this settable at runtime? */
static int debug = NOZOMI_DEBUG_LEVEL;
-#define D(lvl, args...) do {if (lvl & debug) NFO(KERN_DEBUG, ##args); } \
- while (0)
+#define D(lvl, args...) do \
+ {if (lvl & debug) NFO(KERN_DEBUG, ##args); } \
+ while (0)
#define D_(lvl, args...) D(lvl, ##args)
/* These printouts are always printed */
/* Big endian */
struct toggles {
- unsigned enabled:5; /*
+ unsigned int enabled:5; /*
* Toggle fields are valid if enabled is 0,
* else A-channels must always be used.
*/
- unsigned diag_dl:1;
- unsigned mdm_dl:1;
- unsigned mdm_ul:1;
+ unsigned int diag_dl:1;
+ unsigned int mdm_dl:1;
+ unsigned int mdm_ul:1;
} __attribute__ ((packed));
/* Configuration table to read at startup of card */
/* This stores all control downlink flags */
struct ctrl_dl {
u8 port;
- unsigned reserved:4;
- unsigned CTS:1;
- unsigned RI:1;
- unsigned DCD:1;
- unsigned DSR:1;
+ unsigned int reserved:4;
+ unsigned int CTS:1;
+ unsigned int RI:1;
+ unsigned int DCD:1;
+ unsigned int DSR:1;
} __attribute__ ((packed));
/* This stores all control uplink flags */
struct ctrl_ul {
u8 port;
- unsigned reserved:6;
- unsigned RTS:1;
- unsigned DTR:1;
+ unsigned int reserved:6;
+ unsigned int RTS:1;
+ unsigned int DTR:1;
} __attribute__ ((packed));
#else
/* This represents the toggle information */
struct toggles {
- unsigned mdm_ul:1;
- unsigned mdm_dl:1;
- unsigned diag_dl:1;
- unsigned enabled:5; /*
+ unsigned int mdm_ul:1;
+ unsigned int mdm_dl:1;
+ unsigned int diag_dl:1;
+ unsigned int enabled:5; /*
* Toggle fields are valid if enabled is 0,
* else A-channels must always be used.
*/
/* This stores all control downlink flags */
struct ctrl_dl {
- unsigned DSR:1;
- unsigned DCD:1;
- unsigned RI:1;
- unsigned CTS:1;
- unsigned reserverd:4;
+ unsigned int DSR:1;
+ unsigned int DCD:1;
+ unsigned int RI:1;
+ unsigned int CTS:1;
+ unsigned int reserverd:4;
u8 port;
} __attribute__ ((packed));
/* This stores all control uplink flags */
struct ctrl_ul {
- unsigned DTR:1;
- unsigned RTS:1;
- unsigned reserved:6;
+ unsigned int DTR:1;
+ unsigned int RTS:1;
+ unsigned int reserved:6;
u8 port;
} __attribute__ ((packed));
#endif
} __attribute__ ((packed));
/* Global variables */
-static struct pci_device_id nozomi_pci_tbl[] = {
+static const struct pci_device_id nozomi_pci_tbl[] __devinitconst = {
{PCI_DEVICE(VENDOR1, DEVICE1)},
{},
};
* -Optimize
* -Rewrite cleaner
*/
-static u32 write_mem32(void __iomem *mem_addr_start, u32 *buf,
+static u32 write_mem32(void __iomem *mem_addr_start, const u32 *buf,
u32 size_bytes)
{
u32 i = 0;
u32 *ptr = (__force u32 *) mem_addr_start;
- u16 *buf16;
+ const u16 *buf16;
if (unlikely(!ptr || !buf))
return 0;
/* shortcut for extremely often used cases */
switch (size_bytes) {
case 2: /* 2 bytes */
- buf16 = (u16 *) buf;
+ buf16 = (const u16 *)buf;
writew(__cpu_to_le16(*buf16), (void __iomem *)ptr);
return 2;
break;
while (i < size_bytes) {
if (size_bytes - i == 2) {
/* 2 bytes */
- buf16 = (u16 *) buf;
+ buf16 = (const u16 *)buf;
writew(__cpu_to_le16(*buf16), (void __iomem *)ptr);
i += 2;
} else {
dc->config_table.ul_ctrl_len);
}
#else
-static __inline__ void dump_table(const struct nozomi *dc) { }
+static inline void dump_table(const struct nozomi *dc) { }
#endif
/*
/* Enable uplink interrupts */
static void enable_transmit_ul(enum port_type port, struct nozomi *dc)
{
- u16 mask[NOZOMI_MAX_PORTS] = \
- {MDM_UL, DIAG_UL, APP1_UL, APP2_UL, CTRL_UL};
+ static const u16 mask[] = {MDM_UL, DIAG_UL, APP1_UL, APP2_UL, CTRL_UL};
if (port < NOZOMI_MAX_PORTS) {
dc->last_ier |= mask[port];
/* Disable uplink interrupts */
static void disable_transmit_ul(enum port_type port, struct nozomi *dc)
{
- u16 mask[NOZOMI_MAX_PORTS] = \
- {~MDM_UL, ~DIAG_UL, ~APP1_UL, ~APP2_UL, ~CTRL_UL};
+ static const u16 mask[] =
+ {~MDM_UL, ~DIAG_UL, ~APP1_UL, ~APP2_UL, ~CTRL_UL};
if (port < NOZOMI_MAX_PORTS) {
dc->last_ier &= mask[port];
/* Enable downlink interrupts */
static void enable_transmit_dl(enum port_type port, struct nozomi *dc)
{
- u16 mask[NOZOMI_MAX_PORTS] = \
- {MDM_DL, DIAG_DL, APP1_DL, APP2_DL, CTRL_DL};
+ static const u16 mask[] = {MDM_DL, DIAG_DL, APP1_DL, APP2_DL, CTRL_DL};
if (port < NOZOMI_MAX_PORTS) {
dc->last_ier |= mask[port];
/* Disable downlink interrupts */
static void disable_transmit_dl(enum port_type port, struct nozomi *dc)
{
- u16 mask[NOZOMI_MAX_PORTS] = \
- {~MDM_DL, ~DIAG_DL, ~APP1_DL, ~APP2_DL, ~CTRL_DL};
+ static const u16 mask[] =
+ {~MDM_DL, ~DIAG_DL, ~APP1_DL, ~APP2_DL, ~CTRL_DL};
if (port < NOZOMI_MAX_PORTS) {
dc->last_ier &= mask[port];
* Return 1 - send buffer to card and ack.
* Return 0 - don't ack, don't send buffer to card.
*/
-static int send_data(enum port_type index, struct nozomi *dc)
+static int send_data(enum port_type index, const struct nozomi *dc)
{
u32 size = 0;
- struct port *port = &dc->port[index];
- u8 toggle = port->toggle_ul;
+ const struct port *port = &dc->port[index];
+ const u8 toggle = port->toggle_ul;
void __iomem *addr = port->ul_addr[toggle];
- u32 ul_size = port->ul_size[toggle];
+ const u32 ul_size = port->ul_size[toggle];
struct tty_struct *tty = port->tty;
/* Get data from tty and place in buf for now */
}
/*
- * Handle donlink data, ports that are handled are modem and diagnostics
+ * Handle downlink data, ports that are handled are modem and diagnostics
* Return 1 - ok
* Return 0 - toggle fields are out of sync
*/
static ssize_t card_type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev));
+ const struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev));
return sprintf(buf, "%d\n", dc->card_type);
}
-static DEVICE_ATTR(card_type, 0444, card_type_show, NULL);
+static DEVICE_ATTR(card_type, S_IRUGO, card_type_show, NULL);
static ssize_t open_ttys_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev));
+ const struct nozomi *dc = pci_get_drvdata(to_pci_dev(dev));
return sprintf(buf, "%u\n", dc->open_ttys);
}
-static DEVICE_ATTR(open_ttys, 0444, open_ttys_show, NULL);
+static DEVICE_ATTR(open_ttys, S_IRUGO, open_ttys_show, NULL);
static void make_sysfs_files(struct nozomi *dc)
{
{
struct port *port = tty->driver_data;
int room = 0;
- struct nozomi *dc = get_dc_by_tty(tty);
+ const struct nozomi *dc = get_dc_by_tty(tty);
if (!dc || !port)
return 0;
/* Gets io control parameters */
static int ntty_tiocmget(struct tty_struct *tty, struct file *file)
{
- struct port *port = tty->driver_data;
- struct ctrl_dl *ctrl_dl = &port->ctrl_dl;
- struct ctrl_ul *ctrl_ul = &port->ctrl_ul;
+ const struct port *port = tty->driver_data;
+ const struct ctrl_dl *ctrl_dl = &port->ctrl_dl;
+ const struct ctrl_ul *ctrl_ul = &port->ctrl_ul;
return (ctrl_ul->RTS ? TIOCM_RTS : 0) |
(ctrl_ul->DTR ? TIOCM_DTR : 0) |
static int ntty_cflags_changed(struct port *port, unsigned long flags,
struct async_icount *cprev)
{
- struct async_icount cnow = port->tty_icount;
+ const struct async_icount cnow = port->tty_icount;
int ret;
ret = ((flags & TIOCM_RNG) && (cnow.rng != cprev->rng)) ||
static int ntty_ioctl_tiocgicount(struct port *port, void __user *argp)
{
- struct async_icount cnow = port->tty_icount;
+ const struct async_icount cnow = port->tty_icount;
struct serial_icounter_struct icount;
icount.cts = cnow.cts;
/* just to discard single character writes */
static void ntty_put_char(struct tty_struct *tty, unsigned char c)
{
- /* FIXME !!! */
+ /*
+ * card does not react correct when we write single chars
+ * to the card, so we discard them
+ */
DBG2("PUT CHAR Function: %c", c);
}
return rval;
}
-static struct tty_operations tty_ops = {
+static const struct tty_operations tty_ops = {
.ioctl = ntty_ioctl,
.open = ntty_open,
.close = ntty_close,
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/kthread.h>
+#include <linux/platform_device.h>
#include <asm/uaccess.h>
#include <asm/semaphore.h>
will be called smc-ultra32.
config BFIN_MAC
- tristate "Blackfin 536/537 on-chip mac support"
- depends on NET_ETHERNET && (BF537 || BF536) && (!BF537_PORT_H)
+ tristate "Blackfin 527/536/537 on-chip mac support"
+ depends on NET_ETHERNET && (BF527 || BF537 || BF536) && (!BF537_PORT_H)
select CRC32
select MII
select PHYLIB
config BFIN_MAC_USE_L1
bool "Use L1 memory for rx/tx packets"
- depends on BFIN_MAC && BF537
+ depends on BFIN_MAC && (BF527 || BF537)
default y
help
To get maximum network performance, you should use L1 memory as rx/tx buffers.
config BFIN_MAC_RMII
bool "RMII PHY Interface (EXPERIMENTAL)"
depends on BFIN_MAC && EXPERIMENTAL
- default n
+ default y if BFIN527_EZKIT
+ default n if BFIN537_STAMP
help
Use Reduced PHY MII Interface
config IBMLANA
tristate "IBM LAN Adapter/A support"
- depends on MCA && MCA_LEGACY
+ depends on MCA
---help---
This is a Micro Channel Ethernet adapter. You need to set
CONFIG_MCA to use this driver. It is both available as an in-kernel
/* Wait until PHY reset is complete */
do {
read_phy(lp->phy_address, MII_BMCR, &bmcr);
- } while (!(bmcr && BMCR_RESET));
+ } while (!(bmcr & BMCR_RESET));
disable_mdi();
spin_unlock_irq(&lp->lock);
static void ax_reset_8390(struct net_device *dev)
{
struct ei_device *ei_local = netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
unsigned long reset_start_time = jiffies;
void __iomem *addr = (void __iomem *)dev->base_addr;
if (ei_debug > 1)
- printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies);
+ dev_dbg(&ax->dev->dev, "resetting the 8390 t=%ld\n", jiffies);
ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
/* This check _should_not_ be necessary, omit eventually. */
while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) {
if (jiffies - reset_start_time > 2*HZ/100) {
- printk(KERN_WARNING "%s: %s did not complete.\n",
+ dev_warn(&ax->dev->dev, "%s: %s did not complete.\n",
__FUNCTION__, dev->name);
break;
}
int ring_page)
{
struct ei_device *ei_local = netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
void __iomem *nic_base = ei_local->mem;
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk(KERN_EMERG "%s: DMAing conflict in %s [DMAstat:%d][irqlock:%d].\n",
+ dev_err(&ax->dev->dev, "%s: DMAing conflict in %s "
+ "[DMAstat:%d][irqlock:%d].\n",
dev->name, __FUNCTION__,
- ei_status.dmaing, ei_status.irqlock);
+ ei_status.dmaing, ei_status.irqlock);
return;
}
struct sk_buff *skb, int ring_offset)
{
struct ei_device *ei_local = netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
void __iomem *nic_base = ei_local->mem;
char *buf = skb->data;
if (ei_status.dmaing) {
- printk(KERN_EMERG "%s: DMAing conflict in ax_block_input "
+ dev_err(&ax->dev->dev,
+ "%s: DMAing conflict in %s "
"[DMAstat:%d][irqlock:%d].\n",
- dev->name, ei_status.dmaing, ei_status.irqlock);
+ dev->name, __FUNCTION__,
+ ei_status.dmaing, ei_status.irqlock);
return;
}
const unsigned char *buf, const int start_page)
{
struct ei_device *ei_local = netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
void __iomem *nic_base = ei_local->mem;
unsigned long dma_start;
/* This *shouldn't* happen. If it does, it's the last thing you'll see */
if (ei_status.dmaing) {
- printk(KERN_EMERG "%s: DMAing conflict in %s."
+ dev_err(&ax->dev->dev, "%s: DMAing conflict in %s."
"[DMAstat:%d][irqlock:%d]\n",
dev->name, __FUNCTION__,
ei_status.dmaing, ei_status.irqlock);
while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) {
if (jiffies - dma_start > 2*HZ/100) { /* 20ms */
- printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
+ dev_warn(&ax->dev->dev,
+ "%s: timeout waiting for Tx RDC.\n", dev->name);
ax_reset_8390(dev);
ax_NS8390_init(dev,1);
break;
ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value)
{
struct ei_device *ei = (struct ei_device *) netdev_priv(dev);
+ struct ax_device *ax = to_ax_dev(dev);
unsigned long flags;
- printk(KERN_DEBUG "%s: %p, %04x, %04x %04x\n",
- __FUNCTION__, dev, phy_addr, reg, value);
+ dev_dbg(&ax->dev->dev, "%s: %p, %04x, %04x %04x\n",
+ __FUNCTION__, dev, phy_addr, reg, value);
spin_lock_irqsave(&ei->page_lock, flags);
ax_NS8390_init(dev, 0);
if (first_init) {
- printk("AX88796: %dbit, irq %d, %lx, MAC: ",
- ei_status.word16 ? 16:8, dev->irq, dev->base_addr);
-
- for (i = 0; i < ETHER_ADDR_LEN; i++)
- printk("%2.2x%c", dev->dev_addr[i],
- (i < (ETHER_ADDR_LEN-1) ? ':' : ' '));
+ DECLARE_MAC_BUF(mac);
- printk("\n");
+ dev_info(&ax->dev->dev, "%dbit, irq %d, %lx, MAC: %s\n",
+ ei_status.word16 ? 16:8, dev->irq, dev->base_addr,
+ print_mac(mac, dev->dev_addr));
}
ret = register_netdev(dev);
/*
- * File: drivers/net/bfin_mac.c
- * Based on:
- * Maintainer:
- * Bryan Wu <bryan.wu@analog.com>
+ * Blackfin On-Chip MAC Driver
*
- * Original author:
- * Luke Yang <luke.yang@analog.com>
+ * Copyright 2004-2007 Analog Devices Inc.
*
- * Created:
- * Description:
+ * Enter bugs at http://blackfin.uclinux.org/
*
- * Modified:
- * Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software ; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation ; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY ; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program ; see the file COPYING.
- * If not, write to the Free Software Foundation,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * Licensed under the GPL-2 or later.
*/
#include <linux/init.h>
#define DRV_NAME "bfin_mac"
#define DRV_VERSION "1.1"
#define DRV_AUTHOR "Bryan Wu, Luke Yang"
-#define DRV_DESC "Blackfin BF53[67] on-chip Ethernet MAC driver"
+#define DRV_DESC "Blackfin BF53[67] BF527 on-chip Ethernet MAC driver"
MODULE_AUTHOR(DRV_AUTHOR);
MODULE_LICENSE("GPL");
/* poll the STABUSY bit */
while ((bfin_read_EMAC_STAADD()) & STABUSY) {
- mdelay(10);
+ udelay(1);
if (timeout_cnt-- < 0) {
printk(KERN_ERR DRV_NAME
": wait MDC/MDIO transaction to complete timeout\n");
spin_unlock_irqrestore(&lp->lock, flags);
}
+/* MDC = 2.5 MHz */
+#define MDC_CLK 2500000
+
static int mii_probe(struct net_device *dev)
{
struct bf537mac_local *lp = netdev_priv(dev);
struct phy_device *phydev = NULL;
unsigned short sysctl;
int i;
+ u32 sclk, mdc_div;
/* Enable PHY output early */
if (!(bfin_read_VR_CTL() & PHYCLKOE))
bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE);
- /* MDC = 2.5 MHz */
+ sclk = get_sclk();
+ mdc_div = ((sclk / MDC_CLK) / 2) - 1;
+
sysctl = bfin_read_EMAC_SYSCTL();
- sysctl |= SET_MDCDIV(24);
+ sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div);
bfin_write_EMAC_SYSCTL(sysctl);
/* search for connect PHY device */
lp->phydev = phydev;
printk(KERN_INFO "%s: attached PHY driver [%s] "
- "(mii_bus:phy_addr=%s, irq=%d)\n",
- DRV_NAME, phydev->drv->name, phydev->dev.bus_id, phydev->irq);
+ "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)"
+ "@sclk=%dMHz)\n",
+ DRV_NAME, phydev->drv->name, phydev->dev.bus_id, phydev->irq,
+ MDC_CLK, mdc_div, sclk/1000000);
return 0;
}
*/
if (current_tx_ptr->next->next == tx_list_head) {
while (tx_list_head->status.status_word == 0) {
- mdelay(10);
+ mdelay(1);
if (tx_list_head->status.status_word != 0
|| !(bfin_read_DMA2_IRQ_STATUS() & 0x08)) {
goto adjust_head;
current_rx_ptr->skb = new_skb;
current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
+ /* Invidate the data cache of skb->data range when it is write back
+ * cache. It will prevent overwritting the new data from DMA
+ */
+ blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
+ (unsigned long)new_skb->end);
+
len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN);
skb_put(skb, len);
blackfin_dcache_invalidate_range((unsigned long)skb->head,
#if defined(CONFIG_BFIN_MAC_RMII)
opmode |= RMII; /* For Now only 100MBit are supported */
-#ifdef CONFIG_BF_REV_0_2
+#if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) && CONFIG_BF_REV_0_2
opmode |= TE;
#endif
#endif
netif_wake_queue(dev);
}
+static void bf537mac_multicast_hash(struct net_device *dev)
+{
+ u32 emac_hashhi, emac_hashlo;
+ struct dev_mc_list *dmi = dev->mc_list;
+ char *addrs;
+ int i;
+ u32 crc;
+
+ emac_hashhi = emac_hashlo = 0;
+
+ for (i = 0; i < dev->mc_count; i++) {
+ addrs = dmi->dmi_addr;
+ dmi = dmi->next;
+
+ /* skip non-multicast addresses */
+ if (!(*addrs & 1))
+ continue;
+
+ crc = ether_crc(ETH_ALEN, addrs);
+ crc >>= 26;
+
+ if (crc & 0x20)
+ emac_hashhi |= 1 << (crc & 0x1f);
+ else
+ emac_hashlo |= 1 << (crc & 0x1f);
+ }
+
+ bfin_write_EMAC_HASHHI(emac_hashhi);
+ bfin_write_EMAC_HASHLO(emac_hashlo);
+
+ return;
+}
+
/*
* This routine will, depending on the values passed to it,
* either make it accept multicast packets, go into
sysctl = bfin_read_EMAC_OPMODE();
sysctl |= RAF;
bfin_write_EMAC_OPMODE(sysctl);
- } else if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
+ } else if (dev->flags & IFF_ALLMULTI) {
/* accept all multicast */
sysctl = bfin_read_EMAC_OPMODE();
sysctl |= PAM;
bfin_write_EMAC_OPMODE(sysctl);
+ } else if (dev->mc_count) {
+ /* set up multicast hash table */
+ sysctl = bfin_read_EMAC_OPMODE();
+ sysctl |= HM;
+ bfin_write_EMAC_OPMODE(sysctl);
+ bf537mac_multicast_hash(dev);
} else {
/* clear promisc or multicast mode */
sysctl = bfin_read_EMAC_OPMODE();
return retval;
phy_start(lp->phydev);
+ phy_write(lp->phydev, MII_BMCR, BMCR_RESET);
setup_system_regs(dev);
bf537mac_disable();
bf537mac_enable();
-
pr_debug("hardware init finished\n");
netif_start_queue(dev);
netif_carrier_on(dev);
netif_carrier_off(dev);
phy_stop(lp->phydev);
+ phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN);
/* clear everything */
bf537mac_shutdown(dev);
/* register irq handler */
if (request_irq
(IRQ_MAC_RX, bf537mac_interrupt, IRQF_DISABLED | IRQF_SHARED,
- "BFIN537_MAC_RX", dev)) {
+ "EMAC_RX", dev)) {
printk(KERN_WARNING DRV_NAME
": Unable to attach BlackFin MAC RX interrupt\n");
return -EBUSY;
/*
- * File: drivers/net/bfin_mac.c
- * Based on:
- * Maintainer:
- * Bryan Wu <bryan.wu@analog.com>
+ * Blackfin On-Chip MAC Driver
*
- * Original author:
- * Luke Yang <luke.yang@analog.com>
+ * Copyright 2004-2007 Analog Devices Inc.
*
- * Created:
- * Description:
+ * Enter bugs at http://blackfin.uclinux.org/
*
- * Modified:
- * Copyright 2004-2006 Analog Devices Inc.
- *
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
- *
- * This program is free software ; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation ; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY ; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program ; see the file COPYING.
- * If not, write to the Free Software Foundation,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ * Licensed under the GPL-2 or later.
*/
#define BFIN_MAC_CSUM_OFFLOAD
dev_set_allmulti(slave_dev, 1);
}
+ netif_tx_lock_bh(bond_dev);
/* upload master's mc_list to new slave */
for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) {
dev_mc_add (slave_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
}
+ netif_tx_unlock_bh(bond_dev);
}
if (bond->params.mode == BOND_MODE_8023AD) {
}
/* flush master's mc_list from slave */
+ netif_tx_lock_bh(bond_dev);
bond_mc_list_flush(bond_dev, slave_dev);
+ netif_tx_unlock_bh(bond_dev);
}
netdev_set_master(slave_dev, NULL);
}
/* flush master's mc_list from slave */
+ netif_tx_lock_bh(bond_dev);
bond_mc_list_flush(bond_dev, slave_dev);
+ netif_tx_unlock_bh(bond_dev);
}
netdev_set_master(slave_dev, NULL);
}
if (do_failover) {
- rtnl_lock();
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
- rtnl_unlock();
-
}
re_arm:
slave->link = BOND_LINK_UP;
- rtnl_lock();
-
write_lock_bh(&bond->curr_slave_lock);
if ((!bond->curr_active_slave) &&
}
write_unlock_bh(&bond->curr_slave_lock);
- rtnl_unlock();
}
} else {
read_lock(&bond->curr_slave_lock);
bond->dev->name,
slave->dev->name);
- rtnl_lock();
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
- rtnl_unlock();
-
bond->current_arp_slave = slave;
if (slave) {
bond->primary_slave->dev->name);
/* primary is up so switch to it */
- rtnl_lock();
write_lock_bh(&bond->curr_slave_lock);
bond_change_active_slave(bond, bond->primary_slave);
write_unlock_bh(&bond->curr_slave_lock);
- rtnl_unlock();
-
slave = bond->primary_slave;
slave->jiffies = jiffies;
} else {
{
struct bonding *bond = bond_dev->priv;
struct net_device_stats *stats = &(bond->stats), *sstats;
+ struct net_device_stats local_stats;
struct slave *slave;
int i;
- memset(stats, 0, sizeof(struct net_device_stats));
+ memset(&local_stats, 0, sizeof(struct net_device_stats));
read_lock_bh(&bond->lock);
bond_for_each_slave(bond, slave, i) {
sstats = slave->dev->get_stats(slave->dev);
- stats->rx_packets += sstats->rx_packets;
- stats->rx_bytes += sstats->rx_bytes;
- stats->rx_errors += sstats->rx_errors;
- stats->rx_dropped += sstats->rx_dropped;
+ local_stats.rx_packets += sstats->rx_packets;
+ local_stats.rx_bytes += sstats->rx_bytes;
+ local_stats.rx_errors += sstats->rx_errors;
+ local_stats.rx_dropped += sstats->rx_dropped;
- stats->tx_packets += sstats->tx_packets;
- stats->tx_bytes += sstats->tx_bytes;
- stats->tx_errors += sstats->tx_errors;
- stats->tx_dropped += sstats->tx_dropped;
+ local_stats.tx_packets += sstats->tx_packets;
+ local_stats.tx_bytes += sstats->tx_bytes;
+ local_stats.tx_errors += sstats->tx_errors;
+ local_stats.tx_dropped += sstats->tx_dropped;
- stats->multicast += sstats->multicast;
- stats->collisions += sstats->collisions;
+ local_stats.multicast += sstats->multicast;
+ local_stats.collisions += sstats->collisions;
- stats->rx_length_errors += sstats->rx_length_errors;
- stats->rx_over_errors += sstats->rx_over_errors;
- stats->rx_crc_errors += sstats->rx_crc_errors;
- stats->rx_frame_errors += sstats->rx_frame_errors;
- stats->rx_fifo_errors += sstats->rx_fifo_errors;
- stats->rx_missed_errors += sstats->rx_missed_errors;
+ local_stats.rx_length_errors += sstats->rx_length_errors;
+ local_stats.rx_over_errors += sstats->rx_over_errors;
+ local_stats.rx_crc_errors += sstats->rx_crc_errors;
+ local_stats.rx_frame_errors += sstats->rx_frame_errors;
+ local_stats.rx_fifo_errors += sstats->rx_fifo_errors;
+ local_stats.rx_missed_errors += sstats->rx_missed_errors;
- stats->tx_aborted_errors += sstats->tx_aborted_errors;
- stats->tx_carrier_errors += sstats->tx_carrier_errors;
- stats->tx_fifo_errors += sstats->tx_fifo_errors;
- stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors;
- stats->tx_window_errors += sstats->tx_window_errors;
+ local_stats.tx_aborted_errors += sstats->tx_aborted_errors;
+ local_stats.tx_carrier_errors += sstats->tx_carrier_errors;
+ local_stats.tx_fifo_errors += sstats->tx_fifo_errors;
+ local_stats.tx_heartbeat_errors += sstats->tx_heartbeat_errors;
+ local_stats.tx_window_errors += sstats->tx_window_errors;
}
+ memcpy(stats, &local_stats, sizeof(struct net_device_stats));
+
read_unlock_bh(&bond->lock);
return stats;
struct bonding *bond = bond_dev->priv;
struct dev_mc_list *dmi;
- write_lock_bh(&bond->lock);
-
/*
* Do promisc before checking multicast_mode
*/
bond_set_allmulti(bond, -1);
}
+ read_lock(&bond->lock);
+
bond->flags = bond_dev->flags;
/* looking for addresses to add to slaves' mc list */
bond_mc_list_destroy(bond);
bond_mc_list_copy(bond_dev->mc_list, bond, GFP_ATOMIC);
- write_unlock_bh(&bond->lock);
+ read_unlock(&bond->lock);
}
/*
struct net_device *bond_dev = bond->dev;
bond_work_cancel_all(bond);
+ netif_tx_lock_bh(bond_dev);
bond_mc_list_destroy(bond);
+ netif_tx_unlock_bh(bond_dev);
/* Release the bonded slaves */
bond_release_all(bond_dev);
bond_deinit(bond_dev);
int bond_parse_parm(const char *buf, struct bond_parm_tbl *tbl)
{
int mode = -1, i, rv;
- char modestr[BOND_MAX_MODENAME_LEN + 1] = { 0, };
+ char *p, modestr[BOND_MAX_MODENAME_LEN + 1] = { 0, };
- rv = sscanf(buf, "%d", &mode);
- if (!rv) {
+ for (p = (char *)buf; *p; p++)
+ if (!(isdigit(*p) || isspace(*p)))
+ break;
+
+ if (*p)
rv = sscanf(buf, "%20s", modestr);
- if (!rv)
- return -1;
- }
+ else
+ rv = sscanf(buf, "%d", &mode);
+
+ if (!rv)
+ return -1;
for (i = 0; tbl[i].modename; i++) {
if (mode == tbl[i].mode)
down_write(&bonding_rwsem);
/* Check to see if the bond already exists. */
- list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list)
- if (strnicmp(bond->dev->name, name, IFNAMSIZ) == 0) {
- printk(KERN_ERR DRV_NAME
+ if (name) {
+ list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list)
+ if (strnicmp(bond->dev->name, name, IFNAMSIZ) == 0) {
+ printk(KERN_ERR DRV_NAME
": cannot add bond %s; it already exists\n",
- name);
- res = -EPERM;
- goto out_rtnl;
- }
+ name);
+ res = -EPERM;
+ goto out_rtnl;
+ }
+ }
bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "",
ether_setup);
#include "bond_3ad.h"
#include "bond_alb.h"
-#define DRV_VERSION "3.2.3"
-#define DRV_RELDATE "December 6, 2007"
+#define DRV_VERSION "3.2.4"
+#define DRV_RELDATE "January 28, 2008"
#define DRV_NAME "bonding"
#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause);
}
-void __devinit t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode)
+void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode)
{
#define K * 1024
* defaults for the assorted SGE parameters, which admins can change until
* they are used to initialize the SGE.
*/
-void __devinit t3_sge_prep(struct adapter *adap, struct sge_params *p)
+void t3_sge_prep(struct adapter *adap, struct sge_params *p)
{
int i;
V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
}
-static void __devinit init_mtus(unsigned short mtus[])
+static void init_mtus(unsigned short mtus[])
{
/*
* See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
/*
* Initial congestion control parameters.
*/
-static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
+static void init_cong_ctrl(unsigned short *a, unsigned short *b)
{
a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
a[9] = 2;
* Determines a card's PCI mode and associated parameters, such as speed
* and width.
*/
-static void __devinit get_pci_mode(struct adapter *adapter,
- struct pci_params *p)
+static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
{
static unsigned short speed_map[] = { 33, 66, 100, 133 };
u32 pci_mode, pcie_cap;
* capabilities and default speed/duplex/flow-control/autonegotiation
* settings.
*/
-static void __devinit init_link_config(struct link_config *lc,
- unsigned int caps)
+static void init_link_config(struct link_config *lc, unsigned int caps)
{
lc->supported = caps;
lc->requested_speed = lc->speed = SPEED_INVALID;
* Calculates the size of an MC7 memory in bytes from the value of its
* configuration register.
*/
-static unsigned int __devinit mc7_calc_size(u32 cfg)
+static unsigned int mc7_calc_size(u32 cfg)
{
unsigned int width = G_WIDTH(cfg);
unsigned int banks = !!(cfg & F_BKS) + 1;
return MBs << 20;
}
-static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7,
- unsigned int base_addr, const char *name)
+static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
+ unsigned int base_addr, const char *name)
{
u32 cfg;
return 0;
}
-static int __devinit init_parity(struct adapter *adap)
+static int init_parity(struct adapter *adap)
{
int i, err, addr;
* for some adapter tunables, take PHYs out of reset, and initialize the MDIO
* interface.
*/
-int __devinit t3_prep_adapter(struct adapter *adapter,
- const struct adapter_info *ai, int reset)
+int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
+ int reset)
{
int ret;
unsigned int i, j = 0;
* enabled. 82557 pads with 7Eh, while the later controllers pad
* with 00h.
*
- * IV. Recieve
+ * IV. Receive
*
* The Receive Frame Area (RFA) comprises a ring of Receive Frame
* Descriptors (RFD) + data buffer, thus forming the simplified mode
* and Rx indication and re-allocation happen in the same context,
* therefore no locking is required. A software-generated interrupt
* is generated from the watchdog to recover from a failed allocation
- * senario where all Rx resources have been indicated and none re-
+ * scenario where all Rx resources have been indicated and none re-
* placed.
*
* V. Miscellaneous
/* Quadwords to DMA into FIFO before starting frame transmit */
nic->tx_threshold = 0xE0;
- /* no interrupt for every tx completion, delay = 256us if not 557*/
+ /* no interrupt for every tx completion, delay = 256us if not 557 */
nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
&s->complete;
/* Device's stats reporting may take several microseconds to
- * complete, so where always waiting for results of the
+ * complete, so we're always waiting for results of the
* previous command. */
if(*complete == cpu_to_le32(cuc_dump_reset_complete)) {
if(restart_required) {
// ack the rnr?
- writeb(stat_ack_rnr, &nic->csr->scb.stat_ack);
+ iowrite8(stat_ack_rnr, &nic->csr->scb.stat_ack);
e100_start_receiver(nic, nic->rx_to_clean);
if(work_done)
(*work_done)++;
struct nic *nic = netdev_priv(netdev);
unregister_netdev(netdev);
e100_free(nic);
- iounmap(nic->csr);
+ pci_iounmap(pdev, nic->csr);
free_netdev(netdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
/**
* e100_io_error_detected - called when PCI error is detected.
* @pdev: Pointer to PCI device
- * @state: The current pci conneection state
+ * @state: The current pci connection state
*/
static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct nic *nic = netdev_priv(netdev);
- /* Similar to calling e100_down(), but avoids adpater I/O. */
+ /* Similar to calling e100_down(), but avoids adapter I/O. */
netdev->stop(netdev);
- /* Detach; put netif into state similar to hotplug unplug. */
+ /* Detach; put netif into a state similar to hotplug unplug. */
napi_enable(&nic->napi);
netif_device_detach(netdev);
pci_disable_device(pdev);
/**
* Dump the eeprom for users having checksum issues
**/
-void e1000_dump_eeprom(struct e1000_adapter *adapter)
+static void e1000_dump_eeprom(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ethtool_eeprom eeprom;
#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */
#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */
/* Extended Device Control */
#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */
return err;
}
-bool reg_pattern_test_array(struct e1000_adapter *adapter, u64 *data,
- int reg, int offset, u32 mask, u32 write)
+static bool reg_pattern_test_array(struct e1000_adapter *adapter, u64 *data,
+ int reg, int offset, u32 mask, u32 write)
{
int i;
u32 read;
return;
wol->supported = WAKE_UCAST | WAKE_MCAST |
- WAKE_BCAST | WAKE_MAGIC;
+ WAKE_BCAST | WAKE_MAGIC |
+ WAKE_PHY | WAKE_ARP;
/* apply any specific unsupported masks here */
if (adapter->flags & FLAG_NO_WAKE_UCAST) {
wol->wolopts |= WAKE_BCAST;
if (adapter->wol & E1000_WUFC_MAG)
wol->wolopts |= WAKE_MAGIC;
+ if (adapter->wol & E1000_WUFC_LNKC)
+ wol->wolopts |= WAKE_PHY;
+ if (adapter->wol & E1000_WUFC_ARP)
+ wol->wolopts |= WAKE_ARP;
}
static int e1000_set_wol(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+ if (wol->wolopts & WAKE_MAGICSECURE)
return -EOPNOTSUPP;
if (!(adapter->flags & FLAG_HAS_WOL))
adapter->wol |= E1000_WUFC_BC;
if (wol->wolopts & WAKE_MAGIC)
adapter->wol |= E1000_WUFC_MAG;
+ if (wol->wolopts & WAKE_PHY)
+ adapter->wol |= E1000_WUFC_LNKC;
+ if (wol->wolopts & WAKE_ARP)
+ adapter->wol |= E1000_WUFC_ARP;
return 0;
}
int irq_flags = IRQF_SHARED;
int err;
- err = pci_enable_msi(adapter->pdev);
- if (err) {
- ndev_warn(netdev,
- "Unable to allocate MSI interrupt Error: %d\n", err);
- } else {
+ if (!pci_enable_msi(adapter->pdev)) {
adapter->flags |= FLAG_MSI_ENABLED;
handler = e1000_intr_msi;
irq_flags = 0;
err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
netdev);
if (err) {
+ ndev_err(netdev,
+ "Unable to allocate %s interrupt (return: %d)\n",
+ adapter->flags & FLAG_MSI_ENABLED ? "MSI":"INTx",
+ err);
if (adapter->flags & FLAG_MSI_ENABLED)
pci_disable_msi(adapter->pdev);
- ndev_err(netdev,
- "Unable to allocate interrupt Error: %d\n", err);
}
return err;
int ehea_sense_port_attr(struct ehea_port *port);
int ehea_set_portspeed(struct ehea_port *port, u32 port_speed);
+extern u64 ehea_driver_flags;
+extern struct work_struct ehea_rereg_mr_task;
+
#endif /* __EHEA_H__ */
return ret;
if (netif_carrier_ok(dev)) {
- switch(port->port_speed) {
+ switch (port->port_speed) {
case EHEA_SPEED_10M: cmd->speed = SPEED_10; break;
case EHEA_SPEED_100M: cmd->speed = SPEED_100; break;
case EHEA_SPEED_1G: cmd->speed = SPEED_1000; break;
goto doit;
}
- switch(cmd->speed) {
+ switch (cmd->speed) {
case SPEED_10:
if (cmd->duplex == DUPLEX_FULL)
sp = H_SPEED_10M_F;
#ifndef __EHEA_HW_H__
#define __EHEA_HW_H__
-#define QPX_SQA_VALUE EHEA_BMASK_IBM(48,63)
-#define QPX_RQ1A_VALUE EHEA_BMASK_IBM(48,63)
-#define QPX_RQ2A_VALUE EHEA_BMASK_IBM(48,63)
-#define QPX_RQ3A_VALUE EHEA_BMASK_IBM(48,63)
+#define QPX_SQA_VALUE EHEA_BMASK_IBM(48, 63)
+#define QPX_RQ1A_VALUE EHEA_BMASK_IBM(48, 63)
+#define QPX_RQ2A_VALUE EHEA_BMASK_IBM(48, 63)
+#define QPX_RQ3A_VALUE EHEA_BMASK_IBM(48, 63)
#define QPTEMM_OFFSET(x) offsetof(struct ehea_qptemm, x)
* (C) Copyright IBM Corp. 2006
*
* Authors:
- * Christoph Raisch <raisch@de.ibm.com>
- * Jan-Bernd Themann <themann@de.ibm.com>
- * Thomas Klein <tklein@de.ibm.com>
+ * Christoph Raisch <raisch@de.ibm.com>
+ * Jan-Bernd Themann <themann@de.ibm.com>
+ * Thomas Klein <tklein@de.ibm.com>
*
*
* This program is free software; you can redistribute it and/or modify
static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
static int sq_entries = EHEA_DEF_ENTRIES_SQ;
-static int use_mcs = 0;
-static int use_lro = 0;
+static int use_mcs;
+static int use_lro;
static int lro_max_aggr = EHEA_LRO_MAX_AGGR;
static int num_tx_qps = EHEA_NUM_TX_QP;
-static int prop_carrier_state = 0;
+static int prop_carrier_state;
module_param(msg_level, int, 0);
module_param(rq1_entries, int, 0);
MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
"Default = 0");
-static int port_name_cnt = 0;
+static int port_name_cnt;
static LIST_HEAD(adapter_list);
-u64 ehea_driver_flags = 0;
+u64 ehea_driver_flags;
struct work_struct ehea_rereg_mr_task;
struct semaphore dlpar_mem_lock;
.remove = ehea_remove,
};
-void ehea_dump(void *adr, int len, char *msg) {
+void ehea_dump(void *adr, int len, char *msg)
+{
int x;
unsigned char *deb = adr;
for (x = 0; x < len; x += 16) {
printk(DRV_NAME " %s adr=%p ofs=%04x %016lx %016lx\n", msg,
- deb, x, *((u64*)&deb[0]), *((u64*)&deb[8]));
+ deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
deb += 16;
}
}
last_wqe_index = wqe_index;
rmb();
if (!ehea_check_cqe(cqe, &rq)) {
- if (rq == 1) { /* LL RQ1 */
+ if (rq == 1) {
+ /* LL RQ1 */
skb = get_skb_by_index_ll(skb_arr_rq1,
skb_arr_rq1_len,
wqe_index);
if (!skb)
break;
}
- skb_copy_to_linear_data(skb, ((char*)cqe) + 64,
+ skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
cqe->num_bytes_transfered - 4);
ehea_fill_skb(dev, skb, cqe);
- } else if (rq == 2) { /* RQ2 */
+ } else if (rq == 2) {
+ /* RQ2 */
skb = get_skb_by_index(skb_arr_rq2,
skb_arr_rq2_len, cqe);
if (unlikely(!skb)) {
}
ehea_fill_skb(dev, skb, cqe);
processed_rq2++;
- } else { /* RQ3 */
+ } else {
+ /* RQ3 */
skb = get_skb_by_index(skb_arr_rq3,
skb_arr_rq3_len, cqe);
if (unlikely(!skb)) {
unsigned long flags;
cqe = ehea_poll_cq(send_cq);
- while(cqe && (quota > 0)) {
+ while (cqe && (quota > 0)) {
ehea_inc_cq(send_cq);
cqe_counter++;
static int ehea_poll(struct napi_struct *napi, int budget)
{
- struct ehea_port_res *pr = container_of(napi, struct ehea_port_res, napi);
+ struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
+ napi);
struct net_device *dev = pr->port->netdev;
struct ehea_cqe *cqe;
struct ehea_cqe *cqe_skb = NULL;
u64 hret;
struct hcp_ehea_port_cb0 *cb0;
- cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC); /* May be called via */
- if (!cb0) { /* ehea_neq_tasklet() */
+ /* may be called via ehea_neq_tasklet() */
+ cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+ if (!cb0) {
ehea_error("no mem for cb0");
ret = -ENOMEM;
goto out;
/* MAC address */
port->mac_addr = cb0->port_mac_addr << 16;
- if (!is_valid_ether_addr((u8*)&port->mac_addr)) {
+ if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
ret = -EADDRNOTAVAIL;
goto out_free;
}
static void ehea_neq_tasklet(unsigned long data)
{
- struct ehea_adapter *adapter = (struct ehea_adapter*)data;
+ struct ehea_adapter *adapter = (struct ehea_adapter *)data;
struct ehea_eqe *eqe;
u64 event_mask;
static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
{
- int arr_size = sizeof(void*) * max_q_entries;
+ int arr_size = sizeof(void *) * max_q_entries;
q_skba->arr = vmalloc(arr_size);
if (!q_skba->arr)
nfrags = skb_shinfo(skb)->nr_frags;
sg1entry = &swqe->u.immdata_desc.sg_entry;
- sg_list = (struct ehea_vsgentry*)&swqe->u.immdata_desc.sg_list;
+ sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
swqe->descriptors = 0;
sg1entry_contains_frag_data = 0;
reg_type, port->mac_addr, 0, hcallid);
if (hret != H_SUCCESS) {
ehea_error("%sregistering bc address failed (tagged)",
- hcallid == H_REG_BCMC ? "" : "de");
+ hcallid == H_REG_BCMC ? "" : "de");
ret = -EIO;
goto out_herr;
}
}
}
-static void ehea_add_multicast_entry(struct ehea_port* port, u8* mc_mac_addr)
+static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
{
struct ehea_mc_list *ehea_mcl_entry;
u64 hret;
goto out;
}
- for (i = 0, k_mcl_entry = dev->mc_list;
- i < dev->mc_count;
- i++, k_mcl_entry = k_mcl_entry->next) {
+ for (i = 0, k_mcl_entry = dev->mc_list; i < dev->mc_count; i++,
+ k_mcl_entry = k_mcl_entry->next)
ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr);
- }
+
}
out:
return;
if ((skb->protocol == htons(ETH_P_IP)) &&
(ip_hdr(skb)->protocol == IPPROTO_TCP)) {
- tcp = (struct tcphdr*)(skb_network_header(skb) + (ip_hdr(skb)->ihl * 4));
+ tcp = (struct tcphdr *)(skb_network_header(skb) +
+ (ip_hdr(skb)->ihl * 4));
tmp = (tcp->source + (tcp->dest << 16)) % 31;
tmp += ip_hdr(skb)->daddr % 31;
return tmp % num_qps;
- }
- else
+ } else
return 0;
}
u64 hret;
u16 dummy16 = 0;
u64 dummy64 = 0;
- struct hcp_modify_qp_cb0* cb0;
+ struct hcp_modify_qp_cb0 *cb0;
cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!cb0) {
int ret = 0;
int i;
- for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
ret |= ehea_clean_portres(port, &port->port_res[i]);
ret |= ehea_destroy_eq(port->qp_eq);
goto out_clean_pr;
}
- for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
if (ret) {
ehea_error("activate_qp failed");
}
}
- for(i = 0; i < port->num_def_qps; i++) {
+ for (i = 0; i < port->num_def_qps; i++) {
ret = ehea_fill_port_res(&port->port_res[i]);
if (ret) {
ehea_error("out_free_irqs");
{
struct ehea_port *port = netdev_priv(dev);
struct ehea_adapter *adapter = port->adapter;
- struct hcp_modify_qp_cb0* cb0;
+ struct hcp_modify_qp_cb0 *cb0;
int ret = -EIO;
int dret;
int i;
return ret;
}
-void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res * pr)
+void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
{
struct ehea_qp qp = *orig_qp;
struct ehea_qp_init_attr *init_attr = &qp.init_attr;
int ret = 0;
int i;
- struct hcp_modify_qp_cb0* cb0;
+ struct hcp_modify_qp_cb0 *cb0;
u64 hret;
u64 dummy64 = 0;
u16 dummy16 = 0;
of_node_put(port->ofdev.node);
}
-static int ehea_driver_sysfs_add(struct device *dev,
- struct device_driver *driver)
-{
- int ret;
-
- ret = sysfs_create_link(&driver->kobj, &dev->kobj,
- kobject_name(&dev->kobj));
- if (ret == 0) {
- ret = sysfs_create_link(&dev->kobj, &driver->kobj,
- "driver");
- if (ret)
- sysfs_remove_link(&driver->kobj,
- kobject_name(&dev->kobj));
- }
- return ret;
-}
-
-static void ehea_driver_sysfs_remove(struct device *dev,
- struct device_driver *driver)
-{
- struct device_driver *drv = driver;
-
- if (drv) {
- sysfs_remove_link(&drv->kobj, kobject_name(&dev->kobj));
- sysfs_remove_link(&dev->kobj, "driver");
- }
-}
-
static struct device *ehea_register_port(struct ehea_port *port,
struct device_node *dn)
{
goto out_unreg_of_dev;
}
- ret = ehea_driver_sysfs_add(&port->ofdev.dev, &ehea_driver.driver);
- if (ret) {
- ehea_error("failed to register sysfs driver link");
- goto out_rem_dev_file;
- }
-
return &port->ofdev.dev;
-out_rem_dev_file:
- device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
out_unreg_of_dev:
of_device_unregister(&port->ofdev);
out:
static void ehea_unregister_port(struct ehea_port *port)
{
- ehea_driver_sysfs_remove(&port->ofdev.dev, &ehea_driver.driver);
device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
of_device_unregister(&port->ofdev);
}
of_node_put(eth_dn);
if (port) {
- for (i=0; i < EHEA_MAX_PORTS; i++)
+ for (i = 0; i < EHEA_MAX_PORTS; i++)
if (!adapter->port[i]) {
adapter->port[i] = port;
break;
ehea_shutdown_single_port(port);
- for (i=0; i < EHEA_MAX_PORTS; i++)
+ for (i = 0; i < EHEA_MAX_PORTS; i++)
if (adapter->port[i] == port) {
adapter->port[i] = NULL;
break;
}
static struct notifier_block ehea_reboot_nb = {
- .notifier_call = ehea_reboot_notifier,
+ .notifier_call = ehea_reboot_notifier,
};
static int check_module_parm(void)
* (C) Copyright IBM Corp. 2006
*
* Authors:
- * Christoph Raisch <raisch@de.ibm.com>
- * Jan-Bernd Themann <themann@de.ibm.com>
- * Thomas Klein <tklein@de.ibm.com>
+ * Christoph Raisch <raisch@de.ibm.com>
+ * Jan-Bernd Themann <themann@de.ibm.com>
+ * Thomas Klein <tklein@de.ibm.com>
*
*
* This program is free software; you can redistribute it and/or modify
}
/* Defines for H_CALL H_ALLOC_RESOURCE */
-#define H_ALL_RES_TYPE_QP 1
-#define H_ALL_RES_TYPE_CQ 2
-#define H_ALL_RES_TYPE_EQ 3
-#define H_ALL_RES_TYPE_MR 5
-#define H_ALL_RES_TYPE_MW 6
+#define H_ALL_RES_TYPE_QP 1
+#define H_ALL_RES_TYPE_CQ 2
+#define H_ALL_RES_TYPE_EQ 3
+#define H_ALL_RES_TYPE_MR 5
+#define H_ALL_RES_TYPE_MW 6
static long ehea_plpar_hcall_norets(unsigned long opcode,
unsigned long arg1,
const u64 qp_handle, const u64 sel_mask, void *cb_addr)
{
return ehea_plpar_hcall_norets(H_QUERY_HEA_QP,
- adapter_handle, /* R4 */
- qp_category, /* R5 */
- qp_handle, /* R6 */
- sel_mask, /* R7 */
+ adapter_handle, /* R4 */
+ qp_category, /* R5 */
+ qp_handle, /* R6 */
+ sel_mask, /* R7 */
virt_to_abs(cb_addr), /* R8 */
0, 0);
}
/* input param R5 */
-#define H_ALL_RES_QP_EQPO EHEA_BMASK_IBM(9, 11)
-#define H_ALL_RES_QP_QPP EHEA_BMASK_IBM(12, 12)
-#define H_ALL_RES_QP_RQR EHEA_BMASK_IBM(13, 15)
-#define H_ALL_RES_QP_EQEG EHEA_BMASK_IBM(16, 16)
-#define H_ALL_RES_QP_LL_QP EHEA_BMASK_IBM(17, 17)
-#define H_ALL_RES_QP_DMA128 EHEA_BMASK_IBM(19, 19)
-#define H_ALL_RES_QP_HSM EHEA_BMASK_IBM(20, 21)
-#define H_ALL_RES_QP_SIGT EHEA_BMASK_IBM(22, 23)
-#define H_ALL_RES_QP_TENURE EHEA_BMASK_IBM(48, 55)
-#define H_ALL_RES_QP_RES_TYP EHEA_BMASK_IBM(56, 63)
+#define H_ALL_RES_QP_EQPO EHEA_BMASK_IBM(9, 11)
+#define H_ALL_RES_QP_QPP EHEA_BMASK_IBM(12, 12)
+#define H_ALL_RES_QP_RQR EHEA_BMASK_IBM(13, 15)
+#define H_ALL_RES_QP_EQEG EHEA_BMASK_IBM(16, 16)
+#define H_ALL_RES_QP_LL_QP EHEA_BMASK_IBM(17, 17)
+#define H_ALL_RES_QP_DMA128 EHEA_BMASK_IBM(19, 19)
+#define H_ALL_RES_QP_HSM EHEA_BMASK_IBM(20, 21)
+#define H_ALL_RES_QP_SIGT EHEA_BMASK_IBM(22, 23)
+#define H_ALL_RES_QP_TENURE EHEA_BMASK_IBM(48, 55)
+#define H_ALL_RES_QP_RES_TYP EHEA_BMASK_IBM(56, 63)
/* input param R9 */
-#define H_ALL_RES_QP_TOKEN EHEA_BMASK_IBM(0, 31)
-#define H_ALL_RES_QP_PD EHEA_BMASK_IBM(32,63)
+#define H_ALL_RES_QP_TOKEN EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_PD EHEA_BMASK_IBM(32, 63)
/* input param R10 */
-#define H_ALL_RES_QP_MAX_SWQE EHEA_BMASK_IBM(4, 7)
-#define H_ALL_RES_QP_MAX_R1WQE EHEA_BMASK_IBM(12, 15)
-#define H_ALL_RES_QP_MAX_R2WQE EHEA_BMASK_IBM(20, 23)
-#define H_ALL_RES_QP_MAX_R3WQE EHEA_BMASK_IBM(28, 31)
+#define H_ALL_RES_QP_MAX_SWQE EHEA_BMASK_IBM(4, 7)
+#define H_ALL_RES_QP_MAX_R1WQE EHEA_BMASK_IBM(12, 15)
+#define H_ALL_RES_QP_MAX_R2WQE EHEA_BMASK_IBM(20, 23)
+#define H_ALL_RES_QP_MAX_R3WQE EHEA_BMASK_IBM(28, 31)
/* Max Send Scatter Gather Elements */
-#define H_ALL_RES_QP_MAX_SSGE EHEA_BMASK_IBM(37, 39)
-#define H_ALL_RES_QP_MAX_R1SGE EHEA_BMASK_IBM(45, 47)
+#define H_ALL_RES_QP_MAX_SSGE EHEA_BMASK_IBM(37, 39)
+#define H_ALL_RES_QP_MAX_R1SGE EHEA_BMASK_IBM(45, 47)
/* Max Receive SG Elements RQ1 */
-#define H_ALL_RES_QP_MAX_R2SGE EHEA_BMASK_IBM(53, 55)
-#define H_ALL_RES_QP_MAX_R3SGE EHEA_BMASK_IBM(61, 63)
+#define H_ALL_RES_QP_MAX_R2SGE EHEA_BMASK_IBM(53, 55)
+#define H_ALL_RES_QP_MAX_R3SGE EHEA_BMASK_IBM(61, 63)
/* input param R11 */
-#define H_ALL_RES_QP_SWQE_IDL EHEA_BMASK_IBM(0, 7)
+#define H_ALL_RES_QP_SWQE_IDL EHEA_BMASK_IBM(0, 7)
/* max swqe immediate data length */
-#define H_ALL_RES_QP_PORT_NUM EHEA_BMASK_IBM(48, 63)
+#define H_ALL_RES_QP_PORT_NUM EHEA_BMASK_IBM(48, 63)
/* input param R12 */
-#define H_ALL_RES_QP_TH_RQ2 EHEA_BMASK_IBM(0, 15)
+#define H_ALL_RES_QP_TH_RQ2 EHEA_BMASK_IBM(0, 15)
/* Threshold RQ2 */
-#define H_ALL_RES_QP_TH_RQ3 EHEA_BMASK_IBM(16, 31)
+#define H_ALL_RES_QP_TH_RQ3 EHEA_BMASK_IBM(16, 31)
/* Threshold RQ3 */
/* output param R6 */
-#define H_ALL_RES_QP_ACT_SWQE EHEA_BMASK_IBM(0, 15)
-#define H_ALL_RES_QP_ACT_R1WQE EHEA_BMASK_IBM(16, 31)
-#define H_ALL_RES_QP_ACT_R2WQE EHEA_BMASK_IBM(32, 47)
-#define H_ALL_RES_QP_ACT_R3WQE EHEA_BMASK_IBM(48, 63)
+#define H_ALL_RES_QP_ACT_SWQE EHEA_BMASK_IBM(0, 15)
+#define H_ALL_RES_QP_ACT_R1WQE EHEA_BMASK_IBM(16, 31)
+#define H_ALL_RES_QP_ACT_R2WQE EHEA_BMASK_IBM(32, 47)
+#define H_ALL_RES_QP_ACT_R3WQE EHEA_BMASK_IBM(48, 63)
/* output param, R7 */
-#define H_ALL_RES_QP_ACT_SSGE EHEA_BMASK_IBM(0, 7)
-#define H_ALL_RES_QP_ACT_R1SGE EHEA_BMASK_IBM(8, 15)
-#define H_ALL_RES_QP_ACT_R2SGE EHEA_BMASK_IBM(16, 23)
-#define H_ALL_RES_QP_ACT_R3SGE EHEA_BMASK_IBM(24, 31)
+#define H_ALL_RES_QP_ACT_SSGE EHEA_BMASK_IBM(0, 7)
+#define H_ALL_RES_QP_ACT_R1SGE EHEA_BMASK_IBM(8, 15)
+#define H_ALL_RES_QP_ACT_R2SGE EHEA_BMASK_IBM(16, 23)
+#define H_ALL_RES_QP_ACT_R3SGE EHEA_BMASK_IBM(24, 31)
#define H_ALL_RES_QP_ACT_SWQE_IDL EHEA_BMASK_IBM(32, 39)
/* output param R8,R9 */
-#define H_ALL_RES_QP_SIZE_SQ EHEA_BMASK_IBM(0, 31)
-#define H_ALL_RES_QP_SIZE_RQ1 EHEA_BMASK_IBM(32, 63)
-#define H_ALL_RES_QP_SIZE_RQ2 EHEA_BMASK_IBM(0, 31)
-#define H_ALL_RES_QP_SIZE_RQ3 EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_QP_SIZE_SQ EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_SIZE_RQ1 EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_QP_SIZE_RQ2 EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_SIZE_RQ3 EHEA_BMASK_IBM(32, 63)
/* output param R11,R12 */
-#define H_ALL_RES_QP_LIOBN_SQ EHEA_BMASK_IBM(0, 31)
-#define H_ALL_RES_QP_LIOBN_RQ1 EHEA_BMASK_IBM(32, 63)
-#define H_ALL_RES_QP_LIOBN_RQ2 EHEA_BMASK_IBM(0, 31)
-#define H_ALL_RES_QP_LIOBN_RQ3 EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_QP_LIOBN_SQ EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_LIOBN_RQ1 EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_QP_LIOBN_RQ2 EHEA_BMASK_IBM(0, 31)
+#define H_ALL_RES_QP_LIOBN_RQ3 EHEA_BMASK_IBM(32, 63)
u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
struct ehea_qp_init_attr *init_attr, const u32 pd,
}
/* Defines for H_CALL H_ALLOC_RESOURCE */
-#define H_ALL_RES_TYPE_QP 1
-#define H_ALL_RES_TYPE_CQ 2
-#define H_ALL_RES_TYPE_EQ 3
-#define H_ALL_RES_TYPE_MR 5
-#define H_ALL_RES_TYPE_MW 6
+#define H_ALL_RES_TYPE_QP 1
+#define H_ALL_RES_TYPE_CQ 2
+#define H_ALL_RES_TYPE_EQ 3
+#define H_ALL_RES_TYPE_MR 5
+#define H_ALL_RES_TYPE_MW 6
/* input param R5 */
-#define H_ALL_RES_EQ_NEQ EHEA_BMASK_IBM(0, 0)
+#define H_ALL_RES_EQ_NEQ EHEA_BMASK_IBM(0, 0)
#define H_ALL_RES_EQ_NON_NEQ_ISN EHEA_BMASK_IBM(6, 7)
#define H_ALL_RES_EQ_INH_EQE_GEN EHEA_BMASK_IBM(16, 16)
-#define H_ALL_RES_EQ_RES_TYPE EHEA_BMASK_IBM(56, 63)
+#define H_ALL_RES_EQ_RES_TYPE EHEA_BMASK_IBM(56, 63)
/* input param R6 */
-#define H_ALL_RES_EQ_MAX_EQE EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_EQ_MAX_EQE EHEA_BMASK_IBM(32, 63)
/* output param R6 */
-#define H_ALL_RES_EQ_LIOBN EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_EQ_LIOBN EHEA_BMASK_IBM(32, 63)
/* output param R7 */
-#define H_ALL_RES_EQ_ACT_EQE EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_EQ_ACT_EQE EHEA_BMASK_IBM(32, 63)
/* output param R8 */
-#define H_ALL_RES_EQ_ACT_PS EHEA_BMASK_IBM(32, 63)
+#define H_ALL_RES_EQ_ACT_PS EHEA_BMASK_IBM(32, 63)
/* output param R9 */
#define H_ALL_RES_EQ_ACT_EQ_IST_C EHEA_BMASK_IBM(30, 31)
hret = ehea_plpar_hcall9(H_REGISTER_SMR,
outs,
- adapter_handle , /* R4 */
- orig_mr_handle, /* R5 */
- vaddr_in, /* R6 */
- (((u64)access_ctrl) << 32ULL), /* R7 */
- pd, /* R8 */
- 0, 0, 0, 0); /* R9-R12 */
+ adapter_handle , /* R4 */
+ orig_mr_handle, /* R5 */
+ vaddr_in, /* R6 */
+ (((u64)access_ctrl) << 32ULL), /* R7 */
+ pd, /* R8 */
+ 0, 0, 0, 0); /* R9-R12 */
mr->handle = outs[0];
mr->lkey = (u32)outs[2];
u64 outs[PLPAR_HCALL9_BUFSIZE];
return ehea_plpar_hcall9(H_DISABLE_AND_GET_HEA,
- outs,
+ outs,
adapter_handle, /* R4 */
H_DISABLE_GET_EHEA_WQE_P, /* R5 */
qp_handle, /* R6 */
- 0, 0, 0, 0, 0, 0); /* R7-R12 */
+ 0, 0, 0, 0, 0, 0); /* R7-R12 */
}
u64 ehea_h_free_resource(const u64 adapter_handle, const u64 res_handle,
{
return ehea_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle, /* R4 */
- res_handle, /* R5 */
+ res_handle, /* R5 */
force_bit,
- 0, 0, 0, 0); /* R7-R10 */
+ 0, 0, 0, 0); /* R7-R10 */
}
u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
const u32 pd, u64 *mr_handle, u32 *lkey)
{
u64 hret;
- u64 outs[PLPAR_HCALL9_BUFSIZE];
+ u64 outs[PLPAR_HCALL9_BUFSIZE];
hret = ehea_plpar_hcall9(H_ALLOC_HEA_RESOURCE,
outs,
adapter_handle, /* R4 */
5, /* R5 */
- vaddr, /* R6 */
+ vaddr, /* R6 */
length, /* R7 */
(((u64) access_ctrl) << 32ULL), /* R8 */
pd, /* R9 */
void *rblock)
{
return ehea_plpar_hcall_norets(H_ERROR_DATA,
- adapter_handle, /* R4 */
- ressource_handle, /* R5 */
- virt_to_abs(rblock), /* R6 */
- 0, 0, 0, 0); /* R7-R12 */
+ adapter_handle, /* R4 */
+ ressource_handle, /* R5 */
+ virt_to_abs(rblock), /* R6 */
+ 0, 0, 0, 0); /* R7-R12 */
}
static inline void hcp_epas_dtor(struct h_epas *epas)
{
if (epas->kernel.addr)
- iounmap((void __iomem*)((u64)epas->kernel.addr & PAGE_MASK));
+ iounmap((void __iomem *)((u64)epas->kernel.addr & PAGE_MASK));
epas->user.addr = 0;
epas->kernel.addr = 0;
const u64 qp_handle,
const u64 sel_mask,
void *cb_addr,
- u64 * inv_attr_id,
- u64 * proc_mask, u16 * out_swr, u16 * out_rwr);
+ u64 *inv_attr_id,
+ u64 *proc_mask, u16 *out_swr, u16 *out_rwr);
u64 ehea_h_alloc_resource_eq(const u64 adapter_handle,
- struct ehea_eq_attr *eq_attr, u64 * eq_handle);
+ struct ehea_eq_attr *eq_attr, u64 *eq_handle);
u64 ehea_h_alloc_resource_cq(const u64 adapter_handle,
struct ehea_cq_attr *cq_attr,
- u64 * cq_handle, struct h_epas *epas);
+ u64 *cq_handle, struct h_epas *epas);
u64 ehea_h_alloc_resource_qp(const u64 adapter_handle,
struct ehea_qp_init_attr *init_attr,
const u32 pd,
- u64 * qp_handle, struct h_epas *h_epas);
+ u64 *qp_handle, struct h_epas *h_epas);
-#define H_REG_RPAGE_PAGE_SIZE EHEA_BMASK_IBM(48,55)
-#define H_REG_RPAGE_QT EHEA_BMASK_IBM(62,63)
+#define H_REG_RPAGE_PAGE_SIZE EHEA_BMASK_IBM(48, 55)
+#define H_REG_RPAGE_QT EHEA_BMASK_IBM(62, 63)
u64 ehea_h_register_rpage(const u64 adapter_handle,
const u8 pagesize,
u64 ehea_h_alloc_resource_mr(const u64 adapter_handle, const u64 vaddr,
const u64 length, const u32 access_ctrl,
- const u32 pd, u64 * mr_handle, u32 * lkey);
+ const u32 pd, u64 *mr_handle, u32 *lkey);
u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
const u8 pagesize, const u8 queue_type,
u64 ehea_h_query_ehea(const u64 adapter_handle, void *cb_addr);
/* output param R5 */
-#define H_MEHEAPORT_CAT EHEA_BMASK_IBM(40,47)
-#define H_MEHEAPORT_PN EHEA_BMASK_IBM(48,63)
+#define H_MEHEAPORT_CAT EHEA_BMASK_IBM(40, 47)
+#define H_MEHEAPORT_PN EHEA_BMASK_IBM(48, 63)
u64 ehea_h_query_ehea_port(const u64 adapter_handle, const u16 port_num,
const u8 cb_cat, const u64 select_mask,
struct ehea_busmap ehea_bmap = { 0, 0, NULL };
-extern u64 ehea_driver_flags;
-extern struct work_struct ehea_rereg_mr_task;
static void *hw_qpageit_get_inc(struct hw_queue *queue)
}
queue->queue_length = nr_of_pages * pagesize;
- queue->queue_pages = kmalloc(nr_of_pages * sizeof(void*), GFP_KERNEL);
+ queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
if (!queue->queue_pages) {
ehea_error("no mem for queue_pages");
return -ENOMEM;
*/
i = 0;
while (i < nr_of_pages) {
- u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL);
+ u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
if (!kpage)
goto out_nomem;
for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
- (queue->queue_pages)[i] = (struct ehea_page*)kpage;
+ (queue->queue_pages)[i] = (struct ehea_page *)kpage;
kpage += pagesize;
i++;
}
return 0;
hcp_epas_dtor(&cq->epas);
-
- if ((hret = ehea_destroy_cq_res(cq, NORMAL_FREE)) == H_R_STATE) {
+ hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
+ if (hret == H_R_STATE) {
ehea_error_data(cq->adapter, cq->fw_handle);
hret = ehea_destroy_cq_res(cq, FORCE_FREE);
}
if (i == (eq->attr.nr_pages - 1)) {
/* last page */
vpage = hw_qpageit_get_inc(&eq->hw_queue);
- if ((hret != H_SUCCESS) || (vpage)) {
+ if ((hret != H_SUCCESS) || (vpage))
goto out_kill_hwq;
- }
+
} else {
- if ((hret != H_PAGE_REGISTERED) || (!vpage)) {
+ if ((hret != H_PAGE_REGISTERED) || (!vpage))
goto out_kill_hwq;
- }
+
}
}
unsigned long flags;
spin_lock_irqsave(&eq->spinlock, flags);
- eqe = (struct ehea_eqe*)hw_eqit_eq_get_inc_valid(&eq->hw_queue);
+ eqe = (struct ehea_eqe *)hw_eqit_eq_get_inc_valid(&eq->hw_queue);
spin_unlock_irqrestore(&eq->spinlock, flags);
return eqe;
hcp_epas_dtor(&eq->epas);
- if ((hret = ehea_destroy_eq_res(eq, NORMAL_FREE)) == H_R_STATE) {
+ hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
+ if (hret == H_R_STATE) {
ehea_error_data(eq->adapter, eq->fw_handle);
hret = ehea_destroy_eq_res(eq, FORCE_FREE);
}
hcp_epas_dtor(&qp->epas);
- if ((hret = ehea_destroy_qp_res(qp, NORMAL_FREE)) == H_R_STATE) {
+ hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
+ if (hret == H_R_STATE) {
ehea_error_data(qp->adapter, qp->fw_handle);
hret = ehea_destroy_qp_res(qp, FORCE_FREE);
}
return 0;
}
-int ehea_create_busmap( void )
+int ehea_create_busmap(void)
{
u64 vaddr = EHEA_BUSMAP_START;
unsigned long high_section_index = 0;
return 0;
}
-void ehea_destroy_busmap( void )
+void ehea_destroy_busmap(void)
{
vfree(ehea_bmap.vaddr);
}
#define EHEA_SECTSIZE (1UL << 24)
#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT)
-#if (1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE
-#error eHEA module can't work if kernel sectionsize < ehea sectionsize
+#if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE)
+#error eHEA module cannot work if kernel sectionsize < ehea sectionsize
#endif
/* Some abbreviations used here:
u64 entry;
};
-#define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52,63)
-#define ERROR_DATA_TYPE EHEA_BMASK_IBM(0,7)
+#define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52, 63)
+#define ERROR_DATA_TYPE EHEA_BMASK_IBM(0, 7)
static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
{
static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue)
{
void *retvalue = hw_qeit_get(queue);
- u32 qe = *(u8*)retvalue;
+ u32 qe = *(u8 *)retvalue;
if ((qe >> 7) == (queue->toggle_state & 1))
hw_qeit_eq_get_inc(queue);
else
int ehea_destroy_cq(struct ehea_cq *cq);
-struct ehea_qp *ehea_create_qp(struct ehea_adapter * adapter, u32 pd,
+struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter, u32 pd,
struct ehea_qp_init_attr *init_attr);
int ehea_destroy_qp(struct ehea_qp *qp);
void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle);
-int ehea_create_busmap( void );
-void ehea_destroy_busmap( void );
+int ehea_create_busmap(void);
+void ehea_destroy_busmap(void);
u64 ehea_map_vaddr(void *caddr);
#endif /* __EHEA_QMR_H__ */
* Copyright (C) 2004 Andrew de Quincey (wol support)
* Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
* IRQ rate fixes, bigendian fixes, cleanups, verification)
- * Copyright (c) 2004,5,6 NVIDIA Corporation
+ * Copyright (c) 2004,2005,2006,2007,2008 NVIDIA Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#define NVREG_MISC1_HD 0x02
#define NVREG_MISC1_FORCE 0x3b0f3c
- NvRegMacReset = 0x3c,
+ NvRegMacReset = 0x34,
#define NVREG_MAC_RESET_ASSERT 0x0F3
NvRegTransmitterControl = 0x084,
#define NVREG_XMITCTL_START 0x01
#define NVREG_MCASTADDRA_FORCE 0x01
NvRegMulticastAddrB = 0xB4,
NvRegMulticastMaskA = 0xB8,
+#define NVREG_MCASTMASKA_NONE 0xffffffff
NvRegMulticastMaskB = 0xBC,
+#define NVREG_MCASTMASKB_NONE 0xffff
NvRegPhyInterface = 0xC0,
#define PHY_RGMII 0x10000000
NvRegTxRingPhysAddrHigh = 0x148,
NvRegRxRingPhysAddrHigh = 0x14C,
NvRegTxPauseFrame = 0x170,
-#define NVREG_TX_PAUSEFRAME_DISABLE 0x1ff0080
-#define NVREG_TX_PAUSEFRAME_ENABLE 0x0c00030
+#define NVREG_TX_PAUSEFRAME_DISABLE 0x01ff0080
+#define NVREG_TX_PAUSEFRAME_ENABLE 0x01800010
NvRegMIIStatus = 0x180,
#define NVREG_MIISTAT_ERROR 0x0001
#define NVREG_MIISTAT_LINKCHANGE 0x0008
#define NV_RX_AVAIL (1<<31)
#define NV_RX2_CHECKSUMMASK (0x1C000000)
-#define NV_RX2_CHECKSUMOK1 (0x10000000)
-#define NV_RX2_CHECKSUMOK2 (0x14000000)
-#define NV_RX2_CHECKSUMOK3 (0x18000000)
+#define NV_RX2_CHECKSUM_IP (0x10000000)
+#define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
+#define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
#define NV_RX2_DESCRIPTORVALID (1<<29)
#define NV_RX2_SUBSTRACT1 (1<<25)
#define NV_RX2_ERROR1 (1<<18)
goto next_pkt;
}
}
- if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ {
+ if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
+ ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else {
- if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 ||
- (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- }
- }
} else {
dev_kfree_skb(skb);
goto next_pkt;
}
}
- if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK2)/*ip and tcp */ {
+ if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
+ ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else {
- if ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK1 ||
- (flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUMOK3) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- }
- }
/* got a valid packet - forward it to the network core */
skb_put(skb, len);
addr[1] = alwaysOn[1];
mask[0] = alwaysOn[0] | alwaysOff[0];
mask[1] = alwaysOn[1] | alwaysOff[1];
+ } else {
+ mask[0] = NVREG_MCASTMASKA_NONE;
+ mask[1] = NVREG_MCASTMASKB_NONE;
}
}
addr[0] |= NVREG_MCASTADDRA_FORCE;
nv_mac_reset(dev);
writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
writel(0, base + NvRegMulticastAddrB);
- writel(0, base + NvRegMulticastMaskA);
- writel(0, base + NvRegMulticastMaskB);
+ writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
+ writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
writel(0, base + NvRegPacketFilterFlags);
writel(0, base + NvRegTransmitterControl);
spin_lock_irq(&np->lock);
writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
writel(0, base + NvRegMulticastAddrB);
- writel(0, base + NvRegMulticastMaskA);
- writel(0, base + NvRegMulticastMaskB);
+ writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
+ writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
/* One manual link speed update: Interrupts are enabled, future link
* speed changes cause interrupts and are handled by nv_link_irq().
},
{ /* MCP77 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP77 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP77 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP77 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{ /* MCP79 Ethernet Controller */
PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39),
- .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT,
+ .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR,
},
{0,},
};
0x0000
};
-static char *ibmlana_adapter_names[] __initdata = {
+static char *ibmlana_adapter_names[] __devinitdata = {
"IBM LAN Adapter/A",
NULL
};
-static int ibmlana_init_one(struct device *kdev)
+static int __devinit ibmlana_init_one(struct device *kdev)
{
struct mca_device *mdev = to_mca_device(kdev);
struct net_device *dev;
if (adapter->msix_entries) {
err = igb_request_msix(adapter);
if (!err) {
- struct e1000_hw *hw = &adapter->hw;
/* enable IAM, auto-mask,
* DO NOT USE EIAME or IAME in legacy mode */
wr32(E1000_IAM, IMS_ENABLE_MASK);
return phy_mii_ioctl(phydev, if_mii(rq), cmd);
}
-static int __devinit macb_probe(struct platform_device *pdev)
+static int __init macb_probe(struct platform_device *pdev)
{
struct eth_platform_data *pdata;
struct resource *regs;
return err;
}
-static int __devexit macb_remove(struct platform_device *pdev)
+static int __exit macb_remove(struct platform_device *pdev)
{
struct net_device *dev;
struct macb *bp;
}
static struct platform_driver macb_driver = {
- .probe = macb_probe,
- .remove = __devexit_p(macb_remove),
+ .remove = __exit_p(macb_remove),
.driver = {
.name = "macb",
},
static int __init macb_init(void)
{
- return platform_driver_register(&macb_driver);
+ return platform_driver_probe(&macb_driver, macb_probe);
}
static void __exit macb_exit(void)
* for more details.
*/
-#define DEBUG
-
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <asm/mips-boards/simint.h>
-#include "mipsnet.h" /* actual device IO mapping */
+#define MIPSNET_VERSION "2007-11-17"
+
+/*
+ * Net status/control block as seen by sw in the core.
+ */
+struct mipsnet_regs {
+ /*
+ * Device info for probing, reads as MIPSNET%d where %d is some
+ * form of version.
+ */
+ u64 devId; /*0x00 */
-#define MIPSNET_VERSION "2005-06-20"
+ /*
+ * read only busy flag.
+ * Set and cleared by the Net Device to indicate that an rx or a tx
+ * is in progress.
+ */
+ u32 busy; /*0x08 */
-#define mipsnet_reg_address(dev, field) (dev->base_addr + field_offset(field))
+ /*
+ * Set by the Net Device.
+ * The device will set it once data has been received.
+ * The value is the number of bytes that should be read from
+ * rxDataBuffer. The value will decrease till 0 until all the data
+ * from rxDataBuffer has been read.
+ */
+ u32 rxDataCount; /*0x0c */
+#define MIPSNET_MAX_RXTX_DATACOUNT (1 << 16)
+
+ /*
+ * Settable from the MIPS core, cleared by the Net Device.
+ * The core should set the number of bytes it wants to send,
+ * then it should write those bytes of data to txDataBuffer.
+ * The device will clear txDataCount has been processed (not
+ * necessarily sent).
+ */
+ u32 txDataCount; /*0x10 */
+
+ /*
+ * Interrupt control
+ *
+ * Used to clear the interrupted generated by this dev.
+ * Write a 1 to clear the interrupt. (except bit31).
+ *
+ * Bit0 is set if it was a tx-done interrupt.
+ * Bit1 is set when new rx-data is available.
+ * Until this bit is cleared there will be no other RXs.
+ *
+ * Bit31 is used for testing, it clears after a read.
+ * Writing 1 to this bit will cause an interrupt to be generated.
+ * To clear the test interrupt, write 0 to this register.
+ */
+ u32 interruptControl; /*0x14 */
+#define MIPSNET_INTCTL_TXDONE (1u << 0)
+#define MIPSNET_INTCTL_RXDONE (1u << 1)
+#define MIPSNET_INTCTL_TESTBIT (1u << 31)
+
+ /*
+ * Readonly core-specific interrupt info for the device to signal
+ * the core. The meaning of the contents of this field might change.
+ */
+ /* XXX: the whole memIntf interrupt scheme is messy: the device
+ * should have no control what so ever of what VPE/register set is
+ * being used.
+ * The MemIntf should only expose interrupt lines, and something in
+ * the config should be responsible for the line<->core/vpe bindings.
+ */
+ u32 interruptInfo; /*0x18 */
+
+ /*
+ * This is where the received data is read out.
+ * There is more data to read until rxDataReady is 0.
+ * Only 1 byte at this regs offset is used.
+ */
+ u32 rxDataBuffer; /*0x1c */
+
+ /*
+ * This is where the data to transmit is written.
+ * Data should be written for the amount specified in the
+ * txDataCount register.
+ * Only 1 byte at this regs offset is used.
+ */
+ u32 txDataBuffer; /*0x20 */
+};
+
+#define regaddr(dev, field) \
+ (dev->base_addr + offsetof(struct mipsnet_regs, field))
static char mipsnet_string[] = "mipsnet";
static int ioiocpy_frommipsnet(struct net_device *dev, unsigned char *kdata,
int len)
{
- uint32_t available_len = inl(mipsnet_reg_address(dev, rxDataCount));
-
- if (available_len < len)
- return -EFAULT;
-
for (; len > 0; len--, kdata++)
- *kdata = inb(mipsnet_reg_address(dev, rxDataBuffer));
+ *kdata = inb(regaddr(dev, rxDataBuffer));
- return inl(mipsnet_reg_address(dev, rxDataCount));
+ return inl(regaddr(dev, rxDataCount));
}
-static inline ssize_t mipsnet_put_todevice(struct net_device *dev,
+static inline void mipsnet_put_todevice(struct net_device *dev,
struct sk_buff *skb)
{
int count_to_go = skb->len;
char *buf_ptr = skb->data;
- outl(skb->len, mipsnet_reg_address(dev, txDataCount));
+ outl(skb->len, regaddr(dev, txDataCount));
for (; count_to_go; buf_ptr++, count_to_go--)
- outb(*buf_ptr, mipsnet_reg_address(dev, txDataBuffer));
+ outb(*buf_ptr, regaddr(dev, txDataBuffer));
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- return skb->len;
+ dev_kfree_skb(skb);
}
static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev)
return 0;
}
-static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count)
+static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t len)
{
struct sk_buff *skb;
- size_t len = count;
- skb = alloc_skb(len + 2, GFP_KERNEL);
+ if (!len)
+ return len;
+
+ skb = dev_alloc_skb(len + NET_IP_ALIGN);
if (!skb) {
dev->stats.rx_dropped++;
return -ENOMEM;
}
- skb_reserve(skb, 2);
+ skb_reserve(skb, NET_IP_ALIGN);
if (ioiocpy_frommipsnet(dev, skb_put(skb, len), len))
return -EFAULT;
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
- return count;
+ return len;
}
static irqreturn_t mipsnet_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
-
- irqreturn_t retval = IRQ_NONE;
- uint64_t interruptFlags;
-
- if (irq == dev->irq) {
- retval = IRQ_HANDLED;
-
- interruptFlags =
- inl(mipsnet_reg_address(dev, interruptControl));
-
- if (interruptFlags & MIPSNET_INTCTL_TXDONE) {
- outl(MIPSNET_INTCTL_TXDONE,
- mipsnet_reg_address(dev, interruptControl));
- /* only one packet at a time, we are done. */
- netif_wake_queue(dev);
- } else if (interruptFlags & MIPSNET_INTCTL_RXDONE) {
- mipsnet_get_fromdev(dev,
- inl(mipsnet_reg_address(dev, rxDataCount)));
- outl(MIPSNET_INTCTL_RXDONE,
- mipsnet_reg_address(dev, interruptControl));
-
- } else if (interruptFlags & MIPSNET_INTCTL_TESTBIT) {
- /*
- * TESTBIT is cleared on read.
- * And takes effect after a write with 0
- */
- outl(0, mipsnet_reg_address(dev, interruptControl));
- } else {
- /* Maybe shared IRQ, just ignore, no clearing. */
- retval = IRQ_NONE;
- }
-
- } else {
- printk(KERN_INFO "%s: %s(): irq %d for unknown device\n",
- dev->name, __FUNCTION__, irq);
- retval = IRQ_NONE;
+ u32 int_flags;
+ irqreturn_t ret = IRQ_NONE;
+
+ if (irq != dev->irq)
+ goto out_badirq;
+
+ /* TESTBIT is cleared on read. */
+ int_flags = inl(regaddr(dev, interruptControl));
+ if (int_flags & MIPSNET_INTCTL_TESTBIT) {
+ /* TESTBIT takes effect after a write with 0. */
+ outl(0, regaddr(dev, interruptControl));
+ ret = IRQ_HANDLED;
+ } else if (int_flags & MIPSNET_INTCTL_TXDONE) {
+ /* Only one packet at a time, we are done. */
+ dev->stats.tx_packets++;
+ netif_wake_queue(dev);
+ outl(MIPSNET_INTCTL_TXDONE,
+ regaddr(dev, interruptControl));
+ ret = IRQ_HANDLED;
+ } else if (int_flags & MIPSNET_INTCTL_RXDONE) {
+ mipsnet_get_fromdev(dev, inl(regaddr(dev, rxDataCount)));
+ outl(MIPSNET_INTCTL_RXDONE, regaddr(dev, interruptControl));
+ ret = IRQ_HANDLED;
}
- return retval;
+ return ret;
+
+out_badirq:
+ printk(KERN_INFO "%s: %s(): irq %d for unknown device\n",
+ dev->name, __FUNCTION__, irq);
+ return ret;
}
static int mipsnet_open(struct net_device *dev)
err = request_irq(dev->irq, &mipsnet_interrupt,
IRQF_SHARED, dev->name, (void *) dev);
-
if (err) {
- release_region(dev->base_addr, MIPSNET_IO_EXTENT);
+ release_region(dev->base_addr, sizeof(struct mipsnet_regs));
return err;
}
netif_start_queue(dev);
/* test interrupt handler */
- outl(MIPSNET_INTCTL_TESTBIT,
- mipsnet_reg_address(dev, interruptControl));
-
+ outl(MIPSNET_INTCTL_TESTBIT, regaddr(dev, interruptControl));
return 0;
}
static int mipsnet_close(struct net_device *dev)
{
netif_stop_queue(dev);
-
+ free_irq(dev->irq, dev);
return 0;
}
*/
netdev->base_addr = 0x4200;
netdev->irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB0 +
- inl(mipsnet_reg_address(netdev, interruptInfo));
+ inl(regaddr(netdev, interruptInfo));
/* Get the io region now, get irq on open() */
- if (!request_region(netdev->base_addr, MIPSNET_IO_EXTENT, "mipsnet")) {
+ if (!request_region(netdev->base_addr, sizeof(struct mipsnet_regs),
+ "mipsnet")) {
err = -EBUSY;
goto out_free_netdev;
}
return 0;
out_free_region:
- release_region(netdev->base_addr, MIPSNET_IO_EXTENT);
+ release_region(netdev->base_addr, sizeof(struct mipsnet_regs));
out_free_netdev:
free_netdev(netdev);
struct net_device *dev = dev_get_drvdata(device);
unregister_netdev(dev);
- release_region(dev->base_addr, MIPSNET_IO_EXTENT);
+ release_region(dev->base_addr, sizeof(struct mipsnet_regs));
free_netdev(dev);
dev_set_drvdata(device, NULL);
+++ /dev/null
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#ifndef __MIPSNET_H
-#define __MIPSNET_H
-
-/*
- * Id of this Net device, as seen by the core.
- */
-#define MIPS_NET_DEV_ID ((uint64_t) \
- ((uint64_t) 'M' << 0)| \
- ((uint64_t) 'I' << 8)| \
- ((uint64_t) 'P' << 16)| \
- ((uint64_t) 'S' << 24)| \
- ((uint64_t) 'N' << 32)| \
- ((uint64_t) 'E' << 40)| \
- ((uint64_t) 'T' << 48)| \
- ((uint64_t) '0' << 56))
-
-/*
- * Net status/control block as seen by sw in the core.
- * (Why not use bit fields? can't be bothered with cross-platform struct
- * packing.)
- */
-struct net_control_block {
- /*
- * dev info for probing
- * reads as MIPSNET%d where %d is some form of version
- */
- uint64_t devId; /* 0x00 */
-
- /*
- * read only busy flag.
- * Set and cleared by the Net Device to indicate that an rx or a tx
- * is in progress.
- */
- uint32_t busy; /* 0x08 */
-
- /*
- * Set by the Net Device.
- * The device will set it once data has been received.
- * The value is the number of bytes that should be read from
- * rxDataBuffer. The value will decrease till 0 until all the data
- * from rxDataBuffer has been read.
- */
- uint32_t rxDataCount; /* 0x0c */
-#define MIPSNET_MAX_RXTX_DATACOUNT (1<<16)
-
- /*
- * Settable from the MIPS core, cleared by the Net Device. The core
- * should set the number of bytes it wants to send, then it should
- * write those bytes of data to txDataBuffer. The device will clear
- * txDataCount has been processed (not necessarily sent).
- */
- uint32_t txDataCount; /* 0x10 */
-
- /*
- * Interrupt control
- *
- * Used to clear the interrupted generated by this dev.
- * Write a 1 to clear the interrupt. (except bit31).
- *
- * Bit0 is set if it was a tx-done interrupt.
- * Bit1 is set when new rx-data is available.
- * Until this bit is cleared there will be no other RXs.
- *
- * Bit31 is used for testing, it clears after a read.
- * Writing 1 to this bit will cause an interrupt to be generated.
- * To clear the test interrupt, write 0 to this register.
- */
- uint32_t interruptControl; /*0x14 */
-#define MIPSNET_INTCTL_TXDONE ((uint32_t)(1 << 0))
-#define MIPSNET_INTCTL_RXDONE ((uint32_t)(1 << 1))
-#define MIPSNET_INTCTL_TESTBIT ((uint32_t)(1 << 31))
-#define MIPSNET_INTCTL_ALLSOURCES (MIPSNET_INTCTL_TXDONE | \
- MIPSNET_INTCTL_RXDONE | \
- MIPSNET_INTCTL_TESTBIT)
-
- /*
- * Readonly core-specific interrupt info for the device to signal the
- * core. The meaning of the contents of this field might change.
- *
- * TODO: the whole memIntf interrupt scheme is messy: the device should
- * have no control what so ever of what VPE/register set is being
- * used. The MemIntf should only expose interrupt lines, and
- * something in the config should be responsible for the
- * line<->core/vpe bindings.
- */
- uint32_t interruptInfo; /* 0x18 */
-
- /*
- * This is where the received data is read out.
- * There is more data to read until rxDataReady is 0.
- * Only 1 byte at this regs offset is used.
- */
- uint32_t rxDataBuffer; /* 0x1c */
-
- /*
- * This is where the data to transmit is written. Data should be
- * written for the amount specified in the txDataCount register. Only
- * 1 byte at this regs offset is used.
- */
- uint32_t txDataBuffer; /* 0x20 */
-};
-
-#define MIPSNET_IO_EXTENT 0x40 /* being generous */
-
-#define field_offset(field) (offsetof(struct net_control_block, field))
-
-#endif /* __MIPSNET_H */
IIId. Synchronization
Most operations are synchronized on the np->lock irq spinlock, except the
-performance critical codepaths:
-
-The rx process only runs in the interrupt handler. Access from outside
-the interrupt handler is only permitted after disable_irq().
-
-The rx process usually runs under the netif_tx_lock. If np->intr_tx_reap
-is set, then access is permitted under spin_lock_irq(&np->lock).
-
-Thus configuration functions that want to access everything must call
- disable_irq(dev->irq);
- netif_tx_lock_bh(dev);
- spin_lock_irq(&np->lock);
-
-IV. Notes
-
-NatSemi PCI network controllers are very uncommon.
+recieve and transmit paths which are synchronised using a combination of
+hardware descriptor ownership, disabling interrupts and NAPI poll scheduling.
IVb. References
#define LRO_MAX_AGGR 64
+#define PE_MIN_MTU 64
+#define PE_MAX_MTU 1500
+#define PE_DEF_MTU ETH_DATA_LEN
+
#define DEFAULT_MSG_ENABLE \
(NETIF_MSG_DRV | \
NETIF_MSG_PROBE | \
& ((ring)->size - 1))
#define RING_AVAIL(ring) ((ring->size) - RING_USED(ring))
-#define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
-
MODULE_LICENSE("GPL");
MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");
return -1;
}
+static void pasemi_mac_intf_disable(struct pasemi_mac *mac)
+{
+ unsigned int flags;
+
+ flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
+ flags &= ~PAS_MAC_CFG_PCFG_PE;
+ write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
+}
+
+static void pasemi_mac_intf_enable(struct pasemi_mac *mac)
+{
+ unsigned int flags;
+
+ flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
+ flags |= PAS_MAC_CFG_PCFG_PE;
+ write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
+}
+
static int pasemi_get_mac_addr(struct pasemi_mac *mac)
{
struct pci_dev *pdev = mac->pdev;
return 0;
}
+static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct pasemi_mac *mac = netdev_priv(dev);
+ struct sockaddr *addr = p;
+ unsigned int adr0, adr1;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ adr0 = dev->dev_addr[2] << 24 |
+ dev->dev_addr[3] << 16 |
+ dev->dev_addr[4] << 8 |
+ dev->dev_addr[5];
+ adr1 = read_mac_reg(mac, PAS_MAC_CFG_ADR1);
+ adr1 &= ~0xffff;
+ adr1 |= dev->dev_addr[0] << 8 | dev->dev_addr[1];
+
+ pasemi_mac_intf_disable(mac);
+ write_mac_reg(mac, PAS_MAC_CFG_ADR0, adr0);
+ write_mac_reg(mac, PAS_MAC_CFG_ADR1, adr1);
+ pasemi_mac_intf_enable(mac);
+
+ return 0;
+}
+
static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
void **tcph, u64 *hdr_flags, void *data)
{
}
-static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac)
+static void pasemi_mac_free_rx_buffers(struct pasemi_mac *mac)
{
struct pasemi_mac_rxring *rx = rx_ring(mac);
unsigned int i;
}
for (i = 0; i < RX_RING_SIZE; i++)
- RX_DESC(rx, i) = 0;
+ RX_BUFF(rx, i) = 0;
+}
+
+static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac)
+{
+ pasemi_mac_free_rx_buffers(mac);
dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64),
rx_ring(mac)->buffers, rx_ring(mac)->buf_dma);
/* Entry in use? */
WARN_ON(*buff);
- skb = dev_alloc_skb(BUF_SIZE);
+ skb = dev_alloc_skb(mac->bufsz);
skb_reserve(skb, LOCAL_SKB_ALIGN);
if (unlikely(!skb))
break;
dma = pci_map_single(mac->dma_pdev, skb->data,
- BUF_SIZE - LOCAL_SKB_ALIGN,
+ mac->bufsz - LOCAL_SKB_ALIGN,
PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(dma))) {
info->skb = skb;
info->dma = dma;
- *buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma);
+ *buff = XCT_RXB_LEN(mac->bufsz) | XCT_RXB_ADDR(dma);
fill++;
}
len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;
- pci_unmap_single(pdev, dma, BUF_SIZE-LOCAL_SKB_ALIGN,
+ pci_unmap_single(pdev, dma, mac->bufsz - LOCAL_SKB_ALIGN,
PCI_DMA_FROMDEVICE);
if (macrx & XCT_MACRX_CRC) {
return IRQ_HANDLED;
}
-static void pasemi_mac_intf_disable(struct pasemi_mac *mac)
-{
- unsigned int flags;
-
- flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
- flags &= ~PAS_MAC_CFG_PCFG_PE;
- write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
-}
-
-static void pasemi_mac_intf_enable(struct pasemi_mac *mac)
-{
- unsigned int flags;
-
- flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
- flags |= PAS_MAC_CFG_PCFG_PE;
- write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
-}
-
static void pasemi_adjust_link(struct net_device *dev)
{
struct pasemi_mac *mac = netdev_priv(dev);
#define MAX_RETRIES 5000
+static void pasemi_mac_pause_txchan(struct pasemi_mac *mac)
+{
+ unsigned int sta, retries;
+ int txch = tx_ring(mac)->chan.chno;
+
+ write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch),
+ PAS_DMA_TXCHAN_TCMDSTA_ST);
+
+ for (retries = 0; retries < MAX_RETRIES; retries++) {
+ sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch));
+ if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT))
+ break;
+ cond_resched();
+ }
+
+ if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)
+ dev_err(&mac->dma_pdev->dev,
+ "Failed to stop tx channel, tcmdsta %08x\n", sta);
+
+ write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 0);
+}
+
+static void pasemi_mac_pause_rxchan(struct pasemi_mac *mac)
+{
+ unsigned int sta, retries;
+ int rxch = rx_ring(mac)->chan.chno;
+
+ write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch),
+ PAS_DMA_RXCHAN_CCMDSTA_ST);
+ for (retries = 0; retries < MAX_RETRIES; retries++) {
+ sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch));
+ if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT))
+ break;
+ cond_resched();
+ }
+
+ if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)
+ dev_err(&mac->dma_pdev->dev,
+ "Failed to stop rx channel, ccmdsta 08%x\n", sta);
+ write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 0);
+}
+
+static void pasemi_mac_pause_rxint(struct pasemi_mac *mac)
+{
+ unsigned int sta, retries;
+
+ write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
+ PAS_DMA_RXINT_RCMDSTA_ST);
+ for (retries = 0; retries < MAX_RETRIES; retries++) {
+ sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
+ if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT))
+ break;
+ cond_resched();
+ }
+
+ if (sta & PAS_DMA_RXINT_RCMDSTA_ACT)
+ dev_err(&mac->dma_pdev->dev,
+ "Failed to stop rx interface, rcmdsta %08x\n", sta);
+ write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
+}
+
static int pasemi_mac_close(struct net_device *dev)
{
struct pasemi_mac *mac = netdev_priv(dev);
unsigned int sta;
- int retries;
int rxch, txch;
rxch = rx_ring(mac)->chan.chno;
pasemi_mac_clean_tx(tx_ring(mac));
pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE);
- /* Disable interface */
- write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch),
- PAS_DMA_TXCHAN_TCMDSTA_ST);
- write_dma_reg( PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
- PAS_DMA_RXINT_RCMDSTA_ST);
- write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch),
- PAS_DMA_RXCHAN_CCMDSTA_ST);
-
- for (retries = 0; retries < MAX_RETRIES; retries++) {
- sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(rxch));
- if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT))
- break;
- cond_resched();
- }
-
- if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)
- dev_err(&mac->dma_pdev->dev, "Failed to stop tx channel\n");
-
- for (retries = 0; retries < MAX_RETRIES; retries++) {
- sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch));
- if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT))
- break;
- cond_resched();
- }
-
- if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)
- dev_err(&mac->dma_pdev->dev, "Failed to stop rx channel\n");
-
- for (retries = 0; retries < MAX_RETRIES; retries++) {
- sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
- if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT))
- break;
- cond_resched();
- }
-
- if (sta & PAS_DMA_RXINT_RCMDSTA_ACT)
- dev_err(&mac->dma_pdev->dev, "Failed to stop rx interface\n");
-
- /* Then, disable the channel. This must be done separately from
- * stopping, since you can't disable when active.
- */
-
- write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 0);
- write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 0);
- write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
+ pasemi_mac_pause_txchan(mac);
+ pasemi_mac_pause_rxint(mac);
+ pasemi_mac_pause_rxchan(mac);
+ pasemi_mac_intf_disable(mac);
free_irq(mac->tx->chan.irq, mac->tx);
free_irq(mac->rx->chan.irq, mac->rx);
return pkts;
}
+static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct pasemi_mac *mac = netdev_priv(dev);
+ unsigned int reg;
+ unsigned int rcmdsta;
+ int running;
+
+ if (new_mtu < PE_MIN_MTU || new_mtu > PE_MAX_MTU)
+ return -EINVAL;
+
+ running = netif_running(dev);
+
+ if (running) {
+ /* Need to stop the interface, clean out all already
+ * received buffers, free all unused buffers on the RX
+ * interface ring, then finally re-fill the rx ring with
+ * the new-size buffers and restart.
+ */
+
+ napi_disable(&mac->napi);
+ netif_tx_disable(dev);
+ pasemi_mac_intf_disable(mac);
+
+ rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
+ pasemi_mac_pause_rxint(mac);
+ pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE);
+ pasemi_mac_free_rx_buffers(mac);
+ }
+
+ /* Change maxf, i.e. what size frames are accepted.
+ * Need room for ethernet header and CRC word
+ */
+ reg = read_mac_reg(mac, PAS_MAC_CFG_MACCFG);
+ reg &= ~PAS_MAC_CFG_MACCFG_MAXF_M;
+ reg |= PAS_MAC_CFG_MACCFG_MAXF(new_mtu + ETH_HLEN + 4);
+ write_mac_reg(mac, PAS_MAC_CFG_MACCFG, reg);
+
+ dev->mtu = new_mtu;
+ /* MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
+ mac->bufsz = new_mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;
+
+ if (running) {
+ write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
+ rcmdsta | PAS_DMA_RXINT_RCMDSTA_EN);
+
+ rx_ring(mac)->next_to_fill = 0;
+ pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE-1);
+
+ napi_enable(&mac->napi);
+ netif_start_queue(dev);
+ pasemi_mac_intf_enable(mac);
+ }
+
+ return 0;
+}
+
static int __devinit
pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
dev->stop = pasemi_mac_close;
dev->hard_start_xmit = pasemi_mac_start_tx;
dev->set_multicast_list = pasemi_mac_set_rx_mode;
+ dev->set_mac_address = pasemi_mac_set_mac_addr;
+ dev->mtu = PE_DEF_MTU;
+ /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
+ mac->bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;
+
+ dev->change_mtu = pasemi_mac_change_mtu;
if (err)
goto out;
struct phy_device *phydev;
struct napi_struct napi;
+ int bufsz; /* RX ring buffer size */
u8 type;
#define MAC_TYPE_GMAC 1
#define MAC_TYPE_XAUI 2
/* MAC CFG register offsets */
enum {
PAS_MAC_CFG_PCFG = 0x80,
+ PAS_MAC_CFG_MACCFG = 0x84,
+ PAS_MAC_CFG_ADR0 = 0x8c,
+ PAS_MAC_CFG_ADR1 = 0x90,
PAS_MAC_CFG_TXP = 0x98,
PAS_MAC_IPC_CHNL = 0x208,
};
#define PAS_MAC_CFG_PCFG_SPD_100M 0x00000001
#define PAS_MAC_CFG_PCFG_SPD_1G 0x00000002
#define PAS_MAC_CFG_PCFG_SPD_10G 0x00000003
+
+#define PAS_MAC_CFG_MACCFG_TXT_M 0x70000000
+#define PAS_MAC_CFG_MACCFG_TXT_S 28
+#define PAS_MAC_CFG_MACCFG_PRES_M 0x0f000000
+#define PAS_MAC_CFG_MACCFG_PRES_S 24
+#define PAS_MAC_CFG_MACCFG_MAXF_M 0x00ffff00
+#define PAS_MAC_CFG_MACCFG_MAXF_S 8
+#define PAS_MAC_CFG_MACCFG_MAXF(x) (((x) << PAS_MAC_CFG_MACCFG_MAXF_S) & \
+ PAS_MAC_CFG_MACCFG_MAXF_M)
+#define PAS_MAC_CFG_MACCFG_MINF_M 0x000000ff
+#define PAS_MAC_CFG_MACCFG_MINF_S 0
+
#define PAS_MAC_CFG_TXP_FCF 0x01000000
#define PAS_MAC_CFG_TXP_FCE 0x00800000
#define PAS_MAC_CFG_TXP_FC 0x00400000
#define NETDRV_W32_F(reg, val32) do { writel ((val32), ioaddr + (reg)); readl (ioaddr + (reg)); } while (0)
-#if MMIO_FLUSH_AUDIT_COMPLETE
+#ifdef MMIO_FLUSH_AUDIT_COMPLETE
/* write MMIO register */
#define NETDRV_W8(reg, val8) writeb ((val8), ioaddr + (reg))
return -ENOMEM;
}
SET_NETDEV_DEV(dev, &pdev->dev);
- tp = dev->priv;
+ tp = netdev_priv(dev);
/* enable device (incl. PCI PM wakeup), and bus-mastering */
rc = pci_enable_device (pdev);
return i;
}
- tp = dev->priv;
+ tp = netdev_priv(dev);
assert (ioaddr != NULL);
assert (dev != NULL);
dev->base_addr = (unsigned long) ioaddr;
/* dev->priv/tp zeroed and aligned in alloc_etherdev */
- tp = dev->priv;
+ tp = netdev_priv(dev);
/* note: tp->chipset set in netdrv_init_board */
tp->drv_flags = PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
assert (dev != NULL);
- np = dev->priv;
+ np = netdev_priv(dev);
assert (np != NULL);
unregister_netdev (dev);
static int mdio_read (struct net_device *dev, int phy_id, int location)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
void *mdio_addr = tp->mmio_addr + Config4;
int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
int retval = 0;
static void mdio_write (struct net_device *dev, int phy_id, int location,
int value)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
void *mdio_addr = tp->mmio_addr + Config4;
int mii_cmd =
(0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
static int netdrv_open (struct net_device *dev)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
int retval;
#ifdef NETDRV_DEBUG
void *ioaddr = tp->mmio_addr;
/* Start the hardware at open or resume. */
static void netdrv_hw_start (struct net_device *dev)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
u32 i;
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
static void netdrv_init_ring (struct net_device *dev)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
int i;
DPRINTK ("ENTER\n");
static void netdrv_timer (unsigned long data)
{
struct net_device *dev = (struct net_device *) data;
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
int next_tick = 60 * HZ;
int mii_lpa;
}
-static void netdrv_tx_clear (struct netdrv_private *tp)
+static void netdrv_tx_clear (struct net_device *dev)
{
int i;
+ struct netdrv_private *tp = netdev_priv(dev);
atomic_set (&tp->cur_tx, 0);
atomic_set (&tp->dirty_tx, 0);
static void netdrv_tx_timeout (struct net_device *dev)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
int i;
u8 tmp8;
/* Stop a shared interrupt from scavenging while we are. */
spin_lock_irqsave (&tp->lock, flags);
- netdrv_tx_clear (tp);
+ netdrv_tx_clear (dev);
spin_unlock_irqrestore (&tp->lock, flags);
static int netdrv_start_xmit (struct sk_buff *skb, struct net_device *dev)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
int entry;
DPRINTK ("%s: netdrv_rx() status %4.4x, size %4.4x,"
" cur %4.4x.\n", dev->name, rx_status,
rx_size, cur_rx);
-#if NETDRV_DEBUG > 2
+#if defined(NETDRV_DEBUG) && (NETDRV_DEBUG > 2)
{
int i;
DPRINTK ("%s: Frame contents ", dev->name);
static irqreturn_t netdrv_interrupt (int irq, void *dev_instance)
{
struct net_device *dev = (struct net_device *) dev_instance;
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
int boguscnt = max_interrupt_work;
void *ioaddr = tp->mmio_addr;
int status = 0, link_changed = 0; /* avoid bogus "uninit" warning */
static int netdrv_close (struct net_device *dev)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
unsigned long flags;
spin_unlock_irqrestore (&tp->lock, flags);
- synchronize_irq ();
+ synchronize_irq (dev->irq);
free_irq (dev->irq, dev);
- netdrv_tx_clear (tp);
+ netdrv_tx_clear (dev);
pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
tp->rx_ring, tp->rx_ring_dma);
static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
struct mii_ioctl_data *data = if_mii(rq);
unsigned long flags;
int rc = 0;
static void netdrv_set_rx_mode (struct net_device *dev)
{
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
u32 mc_filter[2]; /* Multicast hash filter */
int i, rx_mode;
static int netdrv_suspend (struct pci_dev *pdev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata (pdev);
- struct netdrv_private *tp = dev->priv;
+ struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
unsigned long flags;
static int netdrv_resume (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata (pdev);
- struct netdrv_private *tp = dev->priv;
+ /*struct netdrv_private *tp = netdev_priv(dev);*/
if (!netif_running(dev))
return 0;
---help---
Currently supports the IP175C PHY.
+config REALTEK_PHY
+ tristate "Drivers for Realtek PHYs"
+ ---help---
+ Supports the Realtek 821x PHY.
+
config FIXED_PHY
bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
---help---
obj-$(CONFIG_VITESSE_PHY) += vitesse.o
obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
obj-$(CONFIG_ICPLUS_PHY) += icplus.o
+obj-$(CONFIG_REALTEK_PHY) += realtek.o
obj-$(CONFIG_FIXED_PHY) += fixed.o
obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
.driver = { .owner = THIS_MODULE },
};
+static struct phy_driver bcm5482_driver = {
+ .phy_id = 0x0143bcb0,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM5482",
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = bcm54xx_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = bcm54xx_ack_interrupt,
+ .config_intr = bcm54xx_config_intr,
+ .driver = { .owner = THIS_MODULE },
+};
+
static int __init broadcom_init(void)
{
int ret;
ret = phy_driver_register(&bcm5461_driver);
if (ret)
goto out_5461;
+ ret = phy_driver_register(&bcm5482_driver);
+ if (ret)
+ goto out_5482;
return ret;
+out_5482:
+ phy_driver_unregister(&bcm5461_driver);
out_5461:
phy_driver_unregister(&bcm5421_driver);
out_5421:
static void __exit broadcom_exit(void)
{
+ phy_driver_unregister(&bcm5482_driver);
phy_driver_unregister(&bcm5461_driver);
phy_driver_unregister(&bcm5421_driver);
phy_driver_unregister(&bcm5411_driver);
int i;
int err = 0;
- spin_lock_init(&bus->mdio_lock);
+ mutex_init(&bus->mdio_lock);
if (NULL == bus || NULL == bus->name ||
NULL == bus->read ||
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mii.h>
int retval;
struct mii_bus *bus = phydev->bus;
- spin_lock_bh(&bus->mdio_lock);
+ BUG_ON(in_interrupt());
+
+ mutex_lock(&bus->mdio_lock);
retval = bus->read(bus, phydev->addr, regnum);
- spin_unlock_bh(&bus->mdio_lock);
+ mutex_unlock(&bus->mdio_lock);
return retval;
}
int err;
struct mii_bus *bus = phydev->bus;
- spin_lock_bh(&bus->mdio_lock);
+ BUG_ON(in_interrupt());
+
+ mutex_lock(&bus->mdio_lock);
err = bus->write(bus, phydev->addr, regnum, val);
- spin_unlock_bh(&bus->mdio_lock);
+ mutex_unlock(&bus->mdio_lock);
return err;
}
{
int err;
- spin_lock_bh(&phydev->lock);
+ mutex_lock(&phydev->lock);
if (AUTONEG_DISABLE == phydev->autoneg)
phy_sanitize_settings(phydev);
}
out_unlock:
- spin_unlock_bh(&phydev->lock);
+ mutex_unlock(&phydev->lock);
return err;
}
EXPORT_SYMBOL(phy_start_aneg);
static void phy_change(struct work_struct *work);
+static void phy_state_machine(struct work_struct *work);
static void phy_timer(unsigned long data);
/**
{
phydev->adjust_state = handler;
+ INIT_WORK(&phydev->state_queue, phy_state_machine);
init_timer(&phydev->phy_timer);
phydev->phy_timer.function = &phy_timer;
phydev->phy_timer.data = (unsigned long) phydev;
void phy_stop_machine(struct phy_device *phydev)
{
del_timer_sync(&phydev->phy_timer);
+ cancel_work_sync(&phydev->state_queue);
- spin_lock_bh(&phydev->lock);
+ mutex_lock(&phydev->lock);
if (phydev->state > PHY_UP)
phydev->state = PHY_UP;
- spin_unlock_bh(&phydev->lock);
+ mutex_unlock(&phydev->lock);
phydev->adjust_state = NULL;
}
*/
void phy_error(struct phy_device *phydev)
{
- spin_lock_bh(&phydev->lock);
+ mutex_lock(&phydev->lock);
phydev->state = PHY_HALTED;
- spin_unlock_bh(&phydev->lock);
+ mutex_unlock(&phydev->lock);
}
/**
if (err)
goto phy_err;
- spin_lock_bh(&phydev->lock);
+ mutex_lock(&phydev->lock);
if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
phydev->state = PHY_CHANGELINK;
- spin_unlock_bh(&phydev->lock);
+ mutex_unlock(&phydev->lock);
atomic_dec(&phydev->irq_disable);
enable_irq(phydev->irq);
*/
void phy_stop(struct phy_device *phydev)
{
- spin_lock_bh(&phydev->lock);
+ mutex_lock(&phydev->lock);
if (PHY_HALTED == phydev->state)
goto out_unlock;
phydev->state = PHY_HALTED;
out_unlock:
- spin_unlock_bh(&phydev->lock);
+ mutex_unlock(&phydev->lock);
/*
* Cannot call flush_scheduled_work() here as desired because
*/
void phy_start(struct phy_device *phydev)
{
- spin_lock_bh(&phydev->lock);
+ mutex_lock(&phydev->lock);
switch (phydev->state) {
case PHY_STARTING:
default:
break;
}
- spin_unlock_bh(&phydev->lock);
+ mutex_unlock(&phydev->lock);
}
EXPORT_SYMBOL(phy_stop);
EXPORT_SYMBOL(phy_start);
-/* PHY timer which handles the state machine */
-static void phy_timer(unsigned long data)
+/**
+ * phy_state_machine - Handle the state machine
+ * @work: work_struct that describes the work to be done
+ *
+ * Description: Scheduled by the state_queue workqueue each time
+ * phy_timer is triggered.
+ */
+static void phy_state_machine(struct work_struct *work)
{
- struct phy_device *phydev = (struct phy_device *)data;
+ struct phy_device *phydev =
+ container_of(work, struct phy_device, state_queue);
int needs_aneg = 0;
int err = 0;
- spin_lock_bh(&phydev->lock);
+ mutex_lock(&phydev->lock);
if (phydev->adjust_state)
phydev->adjust_state(phydev->attached_dev);
break;
}
- spin_unlock_bh(&phydev->lock);
+ mutex_unlock(&phydev->lock);
if (needs_aneg)
err = phy_start_aneg(phydev);
mod_timer(&phydev->phy_timer, jiffies + PHY_STATE_TIME * HZ);
}
+/* PHY timer which schedules the state machine work */
+static void phy_timer(unsigned long data)
+{
+ struct phy_device *phydev = (struct phy_device *)data;
+
+ /*
+ * PHY I/O operations can potentially sleep so we ensure that
+ * it's done from a process context
+ */
+ schedule_work(&phydev->state_queue);
+}
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
-#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mii.h>
dev->state = PHY_DOWN;
- spin_lock_init(&dev->lock);
+ mutex_init(&dev->lock);
return dev;
}
if (!(phydrv->flags & PHY_HAS_INTERRUPT))
phydev->irq = PHY_POLL;
- spin_lock_bh(&phydev->lock);
+ mutex_lock(&phydev->lock);
/* Start out supporting everything. Eventually,
* a controller will attach, and may modify one
if (phydev->drv->probe)
err = phydev->drv->probe(phydev);
- spin_unlock_bh(&phydev->lock);
+ mutex_unlock(&phydev->lock);
return err;
phydev = to_phy_device(dev);
- spin_lock_bh(&phydev->lock);
+ mutex_lock(&phydev->lock);
phydev->state = PHY_DOWN;
- spin_unlock_bh(&phydev->lock);
+ mutex_unlock(&phydev->lock);
if (phydev->drv->remove)
phydev->drv->remove(phydev);
--- /dev/null
+/*
+ * drivers/net/phy/realtek.c
+ *
+ * Driver for Realtek PHYs
+ *
+ * Author: Johnson Leung <r58129@freescale.com>
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/phy.h>
+
+#define RTL821x_PHYSR 0x11
+#define RTL821x_PHYSR_DUPLEX 0x2000
+#define RTL821x_PHYSR_SPEED 0xc000
+#define RTL821x_INER 0x12
+#define RTL821x_INER_INIT 0x6400
+#define RTL821x_INSR 0x13
+
+MODULE_DESCRIPTION("Realtek PHY driver");
+MODULE_AUTHOR("Johnson Leung");
+MODULE_LICENSE("GPL");
+
+static int rtl821x_ack_interrupt(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_read(phydev, RTL821x_INSR);
+
+ return (err < 0) ? err : 0;
+}
+
+static int rtl821x_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ err = phy_write(phydev, RTL821x_INER,
+ RTL821x_INER_INIT);
+ else
+ err = phy_write(phydev, RTL821x_INER, 0);
+
+ return err;
+}
+
+/* RTL8211B */
+static struct phy_driver rtl821x_driver = {
+ .phy_id = 0x001cc912,
+ .name = "RTL821x Gigabit Ethernet",
+ .phy_id_mask = 0x001fffff,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &rtl821x_ack_interrupt,
+ .config_intr = &rtl821x_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+static int __init realtek_init(void)
+{
+ int ret;
+
+ ret = phy_driver_register(&rtl821x_driver);
+
+ return ret;
+}
+
+static void __exit realtek_exit(void)
+{
+ phy_driver_unregister(&rtl821x_driver);
+}
+
+module_init(realtek_init);
+module_exit(realtek_exit);
lro->iph = ip;
lro->tcph = tcp;
lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
- lro->tcp_ack = ntohl(tcp->ack_seq);
+ lro->tcp_ack = tcp->ack_seq;
lro->sg_num = 1;
lro->total_len = ntohs(ip->tot_len);
lro->frags_len = 0;
* already been done.
*/
if (tcp->doff == 8) {
- u32 *ptr;
- ptr = (u32 *)(tcp+1);
+ __be32 *ptr;
+ ptr = (__be32 *)(tcp+1);
lro->saw_ts = 1;
- lro->cur_tsval = *(ptr+1);
+ lro->cur_tsval = ntohl(*(ptr+1));
lro->cur_tsecr = *(ptr+2);
}
lro->in_use = 1;
/* Update tsecr field if this session has timestamps enabled */
if (lro->saw_ts) {
- u32 *ptr = (u32 *)(tcp + 1);
+ __be32 *ptr = (__be32 *)(tcp + 1);
*(ptr+2) = lro->cur_tsecr;
}
lro->window = tcp->window;
if (lro->saw_ts) {
- u32 *ptr;
+ __be32 *ptr;
/* Update tsecr and tsval from this packet */
- ptr = (u32 *) (tcp + 1);
- lro->cur_tsval = *(ptr + 1);
+ ptr = (__be32 *)(tcp+1);
+ lro->cur_tsval = ntohl(*(ptr+1));
lro->cur_tsecr = *(ptr + 2);
}
}
/* Ensure timestamp value increases monotonically */
if (l_lro)
- if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
+ if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
return -1;
/* timestamp echo reply should be non-zero */
- if (*((u32 *)(ptr+6)) == 0)
+ if (*((__be32 *)(ptr+6)) == 0)
return -1;
}
int in_use;
__be16 window;
u32 cur_tsval;
- u32 cur_tsecr;
+ __be32 cur_tsecr;
u8 saw_ts;
};
{ "SiS 191 PCI Gigabit Ethernet adapter" },
};
-static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
+static struct pci_device_id sis190_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
{ 0, },
static const u32 phy_power[] = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD };
static const u32 coma_mode[] = { PCI_Y2_PHY1_COMA, PCI_Y2_PHY2_COMA };
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
/* Turn on/off phy power saving */
if (onoff)
reg1 |= coma_mode[port];
sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
- reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+ sky2_pci_read32(hw, PCI_DEV_REG1);
udelay(100);
}
imask |= portirq_msk[port];
sky2_write32(hw, B0_IMSK, imask);
+ sky2_set_multicast(dev);
return 0;
err_out:
if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
u16 pci_err;
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
pci_err = sky2_pci_read16(hw, PCI_STATUS);
if (net_ratelimit())
dev_err(&pdev->dev, "PCI hardware error (0x%x)\n",
sky2_pci_write16(hw, PCI_STATUS,
pci_err | PCI_STATUS_ERROR_BITS);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
if (status & Y2_IS_PCI_EXP) {
/* PCI-Express uncorrectable Error occurred */
u32 err;
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS,
0xfffffffful);
dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err);
sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
if (status & Y2_HWE_L1_MASK)
}
sky2_power_on(hw);
+ sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
for (i = 0; i < hw->ports; i++) {
sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
err = sky2_up(dev);
if (err)
dev_close(dev);
- else
- sky2_set_multicast(dev);
}
return err;
dev_close(dev);
goto out;
}
-
- sky2_set_multicast(dev);
}
}
.get_link = bigmac_get_link,
};
-static int __init bigmac_ether_init(struct sbus_dev *qec_sdev)
+static int __devinit bigmac_ether_init(struct sbus_dev *qec_sdev)
{
struct net_device *dev;
static int version_printed;
qecp->gregs + GLOB_RSIZE);
}
-static u8 __init qec_get_burst(struct device_node *dp)
+static u8 __devinit qec_get_burst(struct device_node *dp)
{
u8 bsizes, bsizes_more;
return bsizes;
}
-static struct sunqec * __init get_qec(struct sbus_dev *child_sdev)
+static struct sunqec * __devinit get_qec(struct sbus_dev *child_sdev)
{
struct sbus_dev *qec_sdev = child_sdev->parent;
struct sunqec *qecp;
return NULL;
}
-static int __init qec_ether_init(struct sbus_dev *sdev)
+static int __devinit qec_ether_init(struct sbus_dev *sdev)
{
static unsigned version_printed;
struct net_device *dev;
.handshake_complete = vnet_handshake_complete,
};
-static void print_version(void)
+static void __devinit print_version(void)
{
static int version_printed;
}
-static int olympic_open(struct net_device *dev)
+static int __devinit olympic_open(struct net_device *dev)
{
struct olympic_private *olympic_priv=netdev_priv(dev);
u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*init_srb;
if (!ugeth)
return;
- if (ugeth->uccf)
+ if (ugeth->uccf) {
ucc_fast_free(ugeth->uccf);
+ ugeth->uccf = NULL;
+ }
if (ugeth->p_thread_data_tx) {
qe_muram_free(ugeth->thread_dat_tx_offset);
ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info;
- /* Create CQs for hash tables */
- INIT_LIST_HEAD(&ugeth->group_hash_q);
- INIT_LIST_HEAD(&ugeth->ind_hash_q);
-
if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
(uf_info->bd_mem_part == MEM_PART_MURAM))) {
if (netif_msg_probe(ugeth))
return IRQ_HANDLED;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void ucc_netpoll(struct net_device *dev)
+{
+ struct ucc_geth_private *ugeth = netdev_priv(dev);
+ int irq = ugeth->ug_info->uf_info.irq;
+
+ disable_irq(irq);
+ ucc_geth_irq_handler(irq, dev);
+ enable_irq(irq);
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
/* Called when something needs to use the ethernet device */
/* Returns 0 for success. */
static int ucc_geth_open(struct net_device *dev)
ugeth = netdev_priv(dev);
spin_lock_init(&ugeth->lock);
+ /* Create CQs for hash tables */
+ INIT_LIST_HEAD(&ugeth->group_hash_q);
+ INIT_LIST_HEAD(&ugeth->ind_hash_q);
+
dev_set_drvdata(device, dev);
/* Set the dev->base_addr to the gfar reg region */
#ifdef CONFIG_UGETH_NAPI
netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, UCC_GETH_DEV_WEIGHT);
#endif /* CONFIG_UGETH_NAPI */
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ucc_netpoll;
+#endif
dev->stop = ucc_geth_close;
// dev->change_mtu = ucc_geth_change_mtu;
dev->mtu = 1500;
struct net_device *dev = dev_get_drvdata(device);
struct ucc_geth_private *ugeth = netdev_priv(dev);
- dev_set_drvdata(device, NULL);
- ucc_geth_memclean(ugeth);
+ unregister_netdev(dev);
free_netdev(dev);
+ ucc_geth_memclean(ugeth);
+ dev_set_drvdata(device, NULL);
return 0;
}
netdev->set_multicast_list = rtl8150_set_multicast;
netdev->set_mac_address = rtl8150_set_mac_address;
netdev->get_stats = rtl8150_netdev_stats;
- netdev->mtu = RTL8150_MTU;
SET_ETHTOOL_OPS(netdev, &ops);
dev->intr_interval = 100; /* 100ms */
}
#endif
-static void rhine_hw_init(struct net_device *dev, long pioaddr)
+static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr)
{
struct rhine_private *rp = netdev_priv(dev);
* for 64bit hardware platforms.
*
* TODO
- * Big-endian support
* rx_copybreak/alignment
* Scatter gather
* More testing
* Init state, all RD entries belong to the NIC
*/
for (i = 0; i < vptr->options.numrx; ++i)
- vptr->rd_ring[i].rdesc0.owner = OWNED_BY_NIC;
+ vptr->rd_ring[i].rdesc0.len |= OWNED_BY_NIC;
writew(vptr->options.numrx, ®s->RBRDU);
writel(vptr->rd_pool_dma, ®s->RDBaseLo);
vptr->int_mask = INT_MASK_DEF;
- writel(cpu_to_le32(vptr->rd_pool_dma), ®s->RDBaseLo);
+ writel(vptr->rd_pool_dma, ®s->RDBaseLo);
writew(vptr->options.numrx - 1, ®s->RDCSize);
mac_rx_queue_run(regs);
mac_rx_queue_wake(regs);
writew(vptr->options.numtx - 1, ®s->TDCSize);
for (i = 0; i < vptr->num_txq; i++) {
- writel(cpu_to_le32(vptr->td_pool_dma[i]), &(regs->TDBaseLo[i]));
+ writel(vptr->td_pool_dma[i], ®s->TDBaseLo[i]);
mac_tx_queue_run(regs, i);
}
dirty = vptr->rd_dirty - unusable;
for (avail = vptr->rd_filled & 0xfffc; avail; avail--) {
dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
- vptr->rd_ring[dirty].rdesc0.owner = OWNED_BY_NIC;
+ vptr->rd_ring[dirty].rdesc0.len |= OWNED_BY_NIC;
}
writew(vptr->rd_filled & 0xfffc, ®s->RBRDU);
struct rx_desc *rd = vptr->rd_ring + dirty;
/* Fine for an all zero Rx desc at init time as well */
- if (rd->rdesc0.owner == OWNED_BY_NIC)
+ if (rd->rdesc0.len & OWNED_BY_NIC)
break;
if (!vptr->rd_info[dirty].skb) {
if (!vptr->rd_info[rd_curr].skb)
break;
- if (rd->rdesc0.owner == OWNED_BY_NIC)
+ if (rd->rdesc0.len & OWNED_BY_NIC)
break;
rmb();
/*
* Don't drop CE or RL error frame although RXOK is off
*/
- if ((rd->rdesc0.RSR & RSR_RXOK) || (!(rd->rdesc0.RSR & RSR_RXOK) && (rd->rdesc0.RSR & (RSR_CE | RSR_RL)))) {
+ if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
if (velocity_receive_frame(vptr, rd_curr) < 0)
stats->rx_dropped++;
} else {
stats->rx_dropped++;
}
- rd->inten = 1;
+ rd->size |= RX_INTEN;
vptr->dev->last_rx = jiffies;
struct net_device_stats *stats = &vptr->stats;
struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]);
struct rx_desc *rd = &(vptr->rd_ring[idx]);
- int pkt_len = rd->rdesc0.len;
+ int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
struct sk_buff *skb;
if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
*/
*((u32 *) & (rd->rdesc0)) = 0;
- rd->len = cpu_to_le32(vptr->rx_buf_sz);
- rd->inten = 1;
+ rd->size = cpu_to_le16(vptr->rx_buf_sz) | RX_INTEN;
rd->pa_low = cpu_to_le32(rd_info->skb_dma);
rd->pa_high = 0;
return 0;
td = &(vptr->td_rings[qnum][idx]);
tdinfo = &(vptr->td_infos[qnum][idx]);
- if (td->tdesc0.owner == OWNED_BY_NIC)
+ if (td->tdesc0.len & OWNED_BY_NIC)
break;
if ((works++ > 15))
for (i = 0; i < tdinfo->nskb_dma; i++) {
#ifdef VELOCITY_ZERO_COPY_SUPPORT
- pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], td->tdesc1.len, PCI_DMA_TODEVICE);
+ pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], le16_to_cpu(td->tdesc1.len), PCI_DMA_TODEVICE);
#else
pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], skb->len, PCI_DMA_TODEVICE);
#endif
struct velocity_td_info *tdinfo;
unsigned long flags;
int index;
-
int pktlen = skb->len;
+ __le16 len = cpu_to_le16(pktlen);
#ifdef VELOCITY_ZERO_COPY_SUPPORT
if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
td_ptr = &(vptr->td_rings[qnum][index]);
tdinfo = &(vptr->td_infos[qnum][index]);
- td_ptr->tdesc1.TCPLS = TCPLS_NORMAL;
td_ptr->tdesc1.TCR = TCR0_TIC;
- td_ptr->td_buf[0].queue = 0;
+ td_ptr->td_buf[0].size &= ~TD_QUEUE;
/*
* Pad short frames.
if (pktlen < ETH_ZLEN) {
/* Cannot occur until ZC support */
pktlen = ETH_ZLEN;
+ len = cpu_to_le16(ETH_ZLEN);
skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
memset(tdinfo->buf + skb->len, 0, ETH_ZLEN - skb->len);
tdinfo->skb = skb;
tdinfo->skb_dma[0] = tdinfo->buf_dma;
- td_ptr->tdesc0.pktsize = pktlen;
+ td_ptr->tdesc0.len = len;
td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
td_ptr->td_buf[0].pa_high = 0;
- td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
+ td_ptr->td_buf[0].size = len; /* queue is 0 anyway */
tdinfo->nskb_dma = 1;
- td_ptr->tdesc1.CMDZ = 2;
} else
#ifdef VELOCITY_ZERO_COPY_SUPPORT
if (skb_shinfo(skb)->nr_frags > 0) {
if (nfrags > 6) {
skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
tdinfo->skb_dma[0] = tdinfo->buf_dma;
- td_ptr->tdesc0.pktsize =
+ td_ptr->tdesc0.len = len;
td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
td_ptr->td_buf[0].pa_high = 0;
- td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
+ td_ptr->td_buf[0].size = len; /* queue is 0 anyway */
tdinfo->nskb_dma = 1;
- td_ptr->tdesc1.CMDZ = 2;
} else {
int i = 0;
tdinfo->nskb_dma = 0;
- tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data, skb->len - skb->data_len, PCI_DMA_TODEVICE);
+ tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data,
+ skb_headlen(skb), PCI_DMA_TODEVICE);
- td_ptr->tdesc0.pktsize = pktlen;
+ td_ptr->tdesc0.len = len;
/* FIXME: support 48bit DMA later */
td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma);
td_ptr->td_buf[i].pa_high = 0;
- td_ptr->td_buf[i].bufsize = skb->len->skb->data_len;
+ td_ptr->td_buf[i].size = cpu_to_le16(skb_headlen(skb));
for (i = 0; i < nfrags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- void *addr = ((void *) page_address(frag->page + frag->page_offset));
+ void *addr = (void *)page_address(frag->page) + frag->page_offset;
tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE);
td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
td_ptr->td_buf[i + 1].pa_high = 0;
- td_ptr->td_buf[i + 1].bufsize = frag->size;
+ td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size);
}
tdinfo->nskb_dma = i - 1;
- td_ptr->tdesc1.CMDZ = i;
}
} else
*/
tdinfo->skb = skb;
tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
- td_ptr->tdesc0.pktsize = pktlen;
+ td_ptr->tdesc0.len = len;
td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
td_ptr->td_buf[0].pa_high = 0;
- td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize;
+ td_ptr->td_buf[0].size = len;
tdinfo->nskb_dma = 1;
- td_ptr->tdesc1.CMDZ = 2;
}
+ td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
if (vptr->vlgrp && vlan_tx_tag_present(skb)) {
- td_ptr->tdesc1.pqinf.VID = vlan_tx_tag_get(skb);
- td_ptr->tdesc1.pqinf.priority = 0;
- td_ptr->tdesc1.pqinf.CFI = 0;
+ td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb));
td_ptr->tdesc1.TCR |= TCR0_VETAG;
}
if (prev < 0)
prev = vptr->options.numtx - 1;
- td_ptr->tdesc0.owner = OWNED_BY_NIC;
+ td_ptr->tdesc0.len |= OWNED_BY_NIC;
vptr->td_used[qnum]++;
vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx;
netif_stop_queue(dev);
td_ptr = &(vptr->td_rings[qnum][prev]);
- td_ptr->td_buf[0].queue = 1;
+ td_ptr->td_buf[0].size |= TD_QUEUE;
mac_tx_queue_wake(vptr->mac_regs, qnum);
}
dev->trans_start = jiffies;
velocity_save_context(vptr, &vptr->context);
velocity_shutdown(vptr);
velocity_set_wol(vptr);
- pci_enable_wake(pdev, 3, 1);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
pci_set_power_state(pdev, PCI_D3hot);
} else {
velocity_save_context(vptr, &vptr->context);
* Bits in the RSR0 register
*/
-#define RSR_DETAG 0x0080
-#define RSR_SNTAG 0x0040
-#define RSR_RXER 0x0020
-#define RSR_RL 0x0010
-#define RSR_CE 0x0008
-#define RSR_FAE 0x0004
-#define RSR_CRC 0x0002
-#define RSR_VIDM 0x0001
+#define RSR_DETAG cpu_to_le16(0x0080)
+#define RSR_SNTAG cpu_to_le16(0x0040)
+#define RSR_RXER cpu_to_le16(0x0020)
+#define RSR_RL cpu_to_le16(0x0010)
+#define RSR_CE cpu_to_le16(0x0008)
+#define RSR_FAE cpu_to_le16(0x0004)
+#define RSR_CRC cpu_to_le16(0x0002)
+#define RSR_VIDM cpu_to_le16(0x0001)
/*
* Bits in the RSR1 register
*/
-#define RSR_RXOK 0x8000 // rx OK
-#define RSR_PFT 0x4000 // Perfect filtering address match
-#define RSR_MAR 0x2000 // MAC accept multicast address packet
-#define RSR_BAR 0x1000 // MAC accept broadcast address packet
-#define RSR_PHY 0x0800 // MAC accept physical address packet
-#define RSR_VTAG 0x0400 // 802.1p/1q tagging packet indicator
-#define RSR_STP 0x0200 // start of packet
-#define RSR_EDP 0x0100 // end of packet
-
-/*
- * Bits in the RSR1 register
- */
-
-#define RSR1_RXOK 0x80 // rx OK
-#define RSR1_PFT 0x40 // Perfect filtering address match
-#define RSR1_MAR 0x20 // MAC accept multicast address packet
-#define RSR1_BAR 0x10 // MAC accept broadcast address packet
-#define RSR1_PHY 0x08 // MAC accept physical address packet
-#define RSR1_VTAG 0x04 // 802.1p/1q tagging packet indicator
-#define RSR1_STP 0x02 // start of packet
-#define RSR1_EDP 0x01 // end of packet
+#define RSR_RXOK cpu_to_le16(0x8000) // rx OK
+#define RSR_PFT cpu_to_le16(0x4000) // Perfect filtering address match
+#define RSR_MAR cpu_to_le16(0x2000) // MAC accept multicast address packet
+#define RSR_BAR cpu_to_le16(0x1000) // MAC accept broadcast address packet
+#define RSR_PHY cpu_to_le16(0x0800) // MAC accept physical address packet
+#define RSR_VTAG cpu_to_le16(0x0400) // 802.1p/1q tagging packet indicator
+#define RSR_STP cpu_to_le16(0x0200) // start of packet
+#define RSR_EDP cpu_to_le16(0x0100) // end of packet
/*
* Bits in the CSM register
* Bits in the TSR0 register
*/
-#define TSR0_ABT 0x0080 // Tx abort because of excessive collision
-#define TSR0_OWT 0x0040 // Jumbo frame Tx abort
-#define TSR0_OWC 0x0020 // Out of window collision
-#define TSR0_COLS 0x0010 // experience collision in this transmit event
-#define TSR0_NCR3 0x0008 // collision retry counter[3]
-#define TSR0_NCR2 0x0004 // collision retry counter[2]
-#define TSR0_NCR1 0x0002 // collision retry counter[1]
-#define TSR0_NCR0 0x0001 // collision retry counter[0]
-#define TSR0_TERR 0x8000 //
-#define TSR0_FDX 0x4000 // current transaction is serviced by full duplex mode
-#define TSR0_GMII 0x2000 // current transaction is serviced by GMII mode
-#define TSR0_LNKFL 0x1000 // packet serviced during link down
-#define TSR0_SHDN 0x0400 // shutdown case
-#define TSR0_CRS 0x0200 // carrier sense lost
-#define TSR0_CDH 0x0100 // AQE test fail (CD heartbeat)
-
-/*
- * Bits in the TSR1 register
- */
-
-#define TSR1_TERR 0x80 //
-#define TSR1_FDX 0x40 // current transaction is serviced by full duplex mode
-#define TSR1_GMII 0x20 // current transaction is serviced by GMII mode
-#define TSR1_LNKFL 0x10 // packet serviced during link down
-#define TSR1_SHDN 0x04 // shutdown case
-#define TSR1_CRS 0x02 // carrier sense lost
-#define TSR1_CDH 0x01 // AQE test fail (CD heartbeat)
+#define TSR0_ABT cpu_to_le16(0x0080) // Tx abort because of excessive collision
+#define TSR0_OWT cpu_to_le16(0x0040) // Jumbo frame Tx abort
+#define TSR0_OWC cpu_to_le16(0x0020) // Out of window collision
+#define TSR0_COLS cpu_to_le16(0x0010) // experience collision in this transmit event
+#define TSR0_NCR3 cpu_to_le16(0x0008) // collision retry counter[3]
+#define TSR0_NCR2 cpu_to_le16(0x0004) // collision retry counter[2]
+#define TSR0_NCR1 cpu_to_le16(0x0002) // collision retry counter[1]
+#define TSR0_NCR0 cpu_to_le16(0x0001) // collision retry counter[0]
+#define TSR0_TERR cpu_to_le16(0x8000) //
+#define TSR0_FDX cpu_to_le16(0x4000) // current transaction is serviced by full duplex mode
+#define TSR0_GMII cpu_to_le16(0x2000) // current transaction is serviced by GMII mode
+#define TSR0_LNKFL cpu_to_le16(0x1000) // packet serviced during link down
+#define TSR0_SHDN cpu_to_le16(0x0400) // shutdown case
+#define TSR0_CRS cpu_to_le16(0x0200) // carrier sense lost
+#define TSR0_CDH cpu_to_le16(0x0100) // AQE test fail (CD heartbeat)
//
// Bits in the TCR0 register
*/
struct rdesc0 {
- u16 RSR; /* Receive status */
- u16 len:14; /* Received packet length */
- u16 reserved:1;
- u16 owner:1; /* Who owns this buffer ? */
+ __le16 RSR; /* Receive status */
+ __le16 len; /* bits 0--13; bit 15 - owner */
};
struct rdesc1 {
- u16 PQTAG;
+ __le16 PQTAG;
u8 CSM;
u8 IPKT;
};
+enum {
+ RX_INTEN = __constant_cpu_to_le16(0x8000)
+};
+
struct rx_desc {
struct rdesc0 rdesc0;
struct rdesc1 rdesc1;
- u32 pa_low; /* Low 32 bit PCI address */
- u16 pa_high; /* Next 16 bit PCI address (48 total) */
- u16 len:15; /* Frame size */
- u16 inten:1; /* Enable interrupt */
+ __le32 pa_low; /* Low 32 bit PCI address */
+ __le16 pa_high; /* Next 16 bit PCI address (48 total) */
+ __le16 size; /* bits 0--14 - frame size, bit 15 - enable int. */
} __attribute__ ((__packed__));
/*
*/
struct tdesc0 {
- u16 TSR; /* Transmit status register */
- u16 pktsize:14; /* Size of frame */
- u16 reserved:1;
- u16 owner:1; /* Who owns the buffer */
+ __le16 TSR; /* Transmit status register */
+ __le16 len; /* bits 0--13 - size of frame, bit 15 - owner */
};
-struct pqinf { /* Priority queue info */
- u16 VID:12;
- u16 CFI:1;
- u16 priority:3;
-} __attribute__ ((__packed__));
-
struct tdesc1 {
- struct pqinf pqinf;
+ __le16 vlan;
u8 TCR;
- u8 TCPLS:2;
- u8 reserved:2;
- u8 CMDZ:4;
+ u8 cmd; /* bits 0--1 - TCPLS, bits 4--7 - CMDZ */
} __attribute__ ((__packed__));
+enum {
+ TD_QUEUE = __constant_cpu_to_le16(0x8000)
+};
+
struct td_buf {
- u32 pa_low;
- u16 pa_high;
- u16 bufsize:14;
- u16 reserved:1;
- u16 queue:1;
+ __le32 pa_low;
+ __le16 pa_high;
+ __le16 size; /* bits 0--13 - size, bit 15 - queue */
} __attribute__ ((__packed__));
struct tx_desc {
enum velocity_owner {
OWNED_BY_HOST = 0,
- OWNED_BY_NIC = 1
+ OWNED_BY_NIC = __constant_cpu_to_le16(0x8000)
};
volatile u8 RCR;
volatile u8 TCR;
- volatile u32 CR0Set; /* 0x08 */
- volatile u32 CR0Clr; /* 0x0C */
+ volatile __le32 CR0Set; /* 0x08 */
+ volatile __le32 CR0Clr; /* 0x0C */
volatile u8 MARCAM[8]; /* 0x10 */
- volatile u32 DecBaseHi; /* 0x18 */
- volatile u16 DbfBaseHi; /* 0x1C */
- volatile u16 reserved_1E;
+ volatile __le32 DecBaseHi; /* 0x18 */
+ volatile __le16 DbfBaseHi; /* 0x1C */
+ volatile __le16 reserved_1E;
- volatile u16 ISRCTL; /* 0x20 */
+ volatile __le16 ISRCTL; /* 0x20 */
volatile u8 TXESR;
volatile u8 RXESR;
- volatile u32 ISR; /* 0x24 */
- volatile u32 IMR;
+ volatile __le32 ISR; /* 0x24 */
+ volatile __le32 IMR;
- volatile u32 TDStatusPort; /* 0x2C */
+ volatile __le32 TDStatusPort; /* 0x2C */
- volatile u16 TDCSRSet; /* 0x30 */
+ volatile __le16 TDCSRSet; /* 0x30 */
volatile u8 RDCSRSet;
volatile u8 reserved_33;
- volatile u16 TDCSRClr;
+ volatile __le16 TDCSRClr;
volatile u8 RDCSRClr;
volatile u8 reserved_37;
- volatile u32 RDBaseLo; /* 0x38 */
- volatile u16 RDIdx; /* 0x3C */
- volatile u16 reserved_3E;
+ volatile __le32 RDBaseLo; /* 0x38 */
+ volatile __le16 RDIdx; /* 0x3C */
+ volatile __le16 reserved_3E;
- volatile u32 TDBaseLo[4]; /* 0x40 */
+ volatile __le32 TDBaseLo[4]; /* 0x40 */
- volatile u16 RDCSize; /* 0x50 */
- volatile u16 TDCSize; /* 0x52 */
- volatile u16 TDIdx[4]; /* 0x54 */
- volatile u16 tx_pause_timer; /* 0x5C */
- volatile u16 RBRDU; /* 0x5E */
+ volatile __le16 RDCSize; /* 0x50 */
+ volatile __le16 TDCSize; /* 0x52 */
+ volatile __le16 TDIdx[4]; /* 0x54 */
+ volatile __le16 tx_pause_timer; /* 0x5C */
+ volatile __le16 RBRDU; /* 0x5E */
- volatile u32 FIFOTest0; /* 0x60 */
- volatile u32 FIFOTest1; /* 0x64 */
+ volatile __le32 FIFOTest0; /* 0x60 */
+ volatile __le32 FIFOTest1; /* 0x64 */
volatile u8 CAMADDR; /* 0x68 */
volatile u8 CAMCR; /* 0x69 */
volatile u8 PHYSR1;
volatile u8 MIICR;
volatile u8 MIIADR;
- volatile u16 MIIDATA;
+ volatile __le16 MIIDATA;
- volatile u16 SoftTimer0; /* 0x74 */
- volatile u16 SoftTimer1;
+ volatile __le16 SoftTimer0; /* 0x74 */
+ volatile __le16 SoftTimer1;
volatile u8 CFGA; /* 0x78 */
volatile u8 CFGB;
volatile u8 CFGC;
volatile u8 CFGD;
- volatile u16 DCFG; /* 0x7C */
- volatile u16 MCFG;
+ volatile __le16 DCFG; /* 0x7C */
+ volatile __le16 MCFG;
volatile u8 TBIST; /* 0x80 */
volatile u8 RBIST;
volatile u8 rev_id;
volatile u8 PORSTS;
- volatile u32 MIBData; /* 0x88 */
+ volatile __le32 MIBData; /* 0x88 */
- volatile u16 EEWrData;
+ volatile __le16 EEWrData;
volatile u8 reserved_8E;
volatile u8 BPMDWr;
volatile u8 EECHKSUM; /* 0x92 */
volatile u8 EECSR;
- volatile u16 EERdData; /* 0x94 */
+ volatile __le16 EERdData; /* 0x94 */
volatile u8 EADDR;
volatile u8 EMBCMD;
volatile u8 DEBUG;
volatile u8 CHIPGCR;
- volatile u16 WOLCRSet; /* 0xA0 */
+ volatile __le16 WOLCRSet; /* 0xA0 */
volatile u8 PWCFGSet;
volatile u8 WOLCFGSet;
- volatile u16 WOLCRClr; /* 0xA4 */
+ volatile __le16 WOLCRClr; /* 0xA4 */
volatile u8 PWCFGCLR;
volatile u8 WOLCFGClr;
- volatile u16 WOLSRSet; /* 0xA8 */
- volatile u16 reserved_AA;
+ volatile __le16 WOLSRSet; /* 0xA8 */
+ volatile __le16 reserved_AA;
- volatile u16 WOLSRClr; /* 0xAC */
- volatile u16 reserved_AE;
+ volatile __le16 WOLSRClr; /* 0xAC */
+ volatile __le16 reserved_AE;
- volatile u16 PatternCRC[8]; /* 0xB0 */
- volatile u32 ByteMask[4][4]; /* 0xC0 */
+ volatile __le16 PatternCRC[8]; /* 0xB0 */
+ volatile __le32 ByteMask[4][4]; /* 0xC0 */
} __attribute__ ((__packed__));
struct arp_packet {
u8 dest_mac[ETH_ALEN];
u8 src_mac[ETH_ALEN];
- u16 type;
- u16 ar_hrd;
- u16 ar_pro;
+ __be16 type;
+ __be16 ar_hrd;
+ __be16 ar_pro;
u8 ar_hln;
u8 ar_pln;
- u16 ar_op;
+ __be16 ar_op;
u8 ar_sha[ETH_ALEN];
u8 ar_sip[4];
u8 ar_tha[ETH_ALEN];
struct _magic_packet {
u8 dest_mac[6];
u8 src_mac[6];
- u16 type;
+ __be16 type;
u8 MAC[16][6];
u8 password[6];
} __attribute__ ((__packed__));
#define ath5k_pci_resume NULL
#endif /* CONFIG_PM */
-static struct pci_driver ath5k_pci_drv_id = {
+static struct pci_driver ath5k_pci_driver = {
.name = "ath5k_pci",
.id_table = ath5k_pci_id_table,
.probe = ath5k_pci_probe,
ath5k_debug_init();
- ret = pci_register_driver(&ath5k_pci_drv_id);
+ ret = pci_register_driver(&ath5k_pci_driver);
if (ret) {
printk(KERN_ERR "ath5k_pci: can't register pci driver\n");
return ret;
static void __exit
exit_ath5k_pci(void)
{
- pci_unregister_driver(&ath5k_pci_drv_id);
+ pci_unregister_driver(&ath5k_pci_driver);
ath5k_debug_finish();
}
priv->last_statistics_time = jiffies;
}
-void iwl3945_add_radiotap(struct iwl3945_priv *priv, struct sk_buff *skb,
- struct iwl3945_rx_frame_hdr *rx_hdr,
- struct ieee80211_rx_status *stats)
+static void iwl3945_add_radiotap(struct iwl3945_priv *priv,
+ struct sk_buff *skb,
+ struct iwl3945_rx_frame_hdr *rx_hdr,
+ struct ieee80211_rx_status *stats)
{
/* First cache any information we need before we overwrite
* the information provided in the skb from the hardware */
struct ieee80211_ht_info *sta_ht_inf)
{
__le32 sta_flags;
+ u8 mimo_ps_mode;
if (!sta_ht_inf || !sta_ht_inf->ht_supported)
goto done;
+ mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2;
+
sta_flags = priv->stations[index].sta.station_flags;
- if (((sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS >> 2))
- == IWL_MIMO_PS_DYNAMIC)
+ sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
+
+ switch (mimo_ps_mode) {
+ case WLAN_HT_CAP_MIMO_PS_STATIC:
+ sta_flags |= STA_FLG_MIMO_DIS_MSK;
+ break;
+ case WLAN_HT_CAP_MIMO_PS_DYNAMIC:
sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
- else
- sta_flags &= ~STA_FLG_RTS_MIMO_PROT_MSK;
+ break;
+ case WLAN_HT_CAP_MIMO_PS_DISABLED:
+ break;
+ default:
+ IWL_WARNING("Invalid MIMO PS mode %d", mimo_ps_mode);
+ break;
+ }
sta_flags |= cpu_to_le32(
(u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
if (iwl4965_is_fat_tx_allowed(priv, sta_ht_inf))
sta_flags |= STA_FLG_FAT_EN_MSK;
else
- sta_flags &= (~STA_FLG_FAT_EN_MSK);
+ sta_flags &= ~STA_FLG_FAT_EN_MSK;
priv->stations[index].sta.station_flags = sta_flags;
done:
#define QOS_CONTROL_LEN 2
-#define IEEE80211_STYPE_BACK_REQ 0x0080
-#define IEEE80211_STYPE_BACK 0x0090
-
static inline int ieee80211_is_management(u16 fc)
{
return -ENODEV;
}
+ if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
+ IWL_ERROR("ucode not available for device bringup\n");
+ return -EIO;
+ }
+
/* If platform's RF_KILL switch is NOT set to KILL */
if (iwl3945_read32(priv, CSR_GP_CNTRL) &
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
}
}
- if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
- IWL_ERROR("ucode not available for device bringup\n");
- return -EIO;
- }
-
iwl3945_write32(priv, CSR_INT, 0xFFFFFFFF);
rc = iwl3945_hw_nic_init(priv);
return -ENODEV;
}
+ if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
+ IWL_ERROR("ucode not available for device bringup\n");
+ return -EIO;
+ }
+
/* If platform's RF_KILL switch is NOT set to KILL */
if (iwl4965_read32(priv, CSR_GP_CNTRL) &
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
}
}
- if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
- IWL_ERROR("ucode not available for device bringup\n");
- return -EIO;
- }
-
iwl4965_write32(priv, CSR_INT, 0xFFFFFFFF);
rc = iwl4965_hw_nic_init(priv);
# Build the PCI Hotplug drivers if we were asked to
obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
+ifdef CONFIG_HOTPLUG_PCI
+obj-y += hotplug-pci.o
+endif
# Build the PCI MSI interrupt support
obj-$(CONFIG_PCI_MSI) += msi.o
--- /dev/null
+/* Core PCI functionality used only by PCI hotplug */
+
+#include <linux/pci.h>
+#include "pci.h"
+
+
+unsigned int pci_do_scan_bus(struct pci_bus *bus)
+{
+ unsigned int max;
+
+ max = pci_scan_child_bus(bus);
+
+ /*
+ * Make the discovered devices available.
+ */
+ pci_bus_add_devices(bus);
+
+ return max;
+}
+EXPORT_SYMBOL(pci_do_scan_bus);
#include <linux/topology.h>
#include <linux/mm.h>
#include <linux/capability.h>
-#include <linux/aspm.h>
#include "pci.h"
static int sysfs_initialized; /* = 0 */
if (pcibios_add_platform_entries(pdev))
goto err_rom_file;
- pcie_aspm_create_sysfs_dev_files(pdev);
-
return 0;
err_rom_file:
if (!sysfs_initialized)
return;
- pcie_aspm_remove_sysfs_dev_files(pdev);
-
if (pdev->cfg_size < 4096)
sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
else
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/log2.h>
-#include <linux/aspm.h>
#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include "pci.h"
if (need_restore)
pci_restore_bars(dev);
- if (dev->bus->self)
- pcie_aspm_pm_state_change(dev->bus->self);
-
return 0;
}
When in doubt, say N.
source "drivers/pci/pcie/aer/Kconfig"
-
-#
-# PCI Express ASPM