commit-hurd
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[SCM] Debian GNU Hurd packaging branch, master, updated. debian/20120408


From: Samuel Thibault
Subject: [SCM] Debian GNU Hurd packaging branch, master, updated. debian/20120408-1-2-g2daea0e
Date: Tue, 17 Apr 2012 22:33:19 +0000

The following commit has been merged in the master branch:
commit 754f987c8928015a31b9f13eb7d20ef7a1275609
Author: Samuel Thibault <address@hidden>
Date:   Mon Apr 16 00:33:01 2012 +0000

    Upgrade libdde_linux26 layer to 2.6.32 compatibility
    
      * debian/patches/libdde_{addr_fix,addr_list,config,devres,dma_head,ethoc,
      ethtool,fixes,group_addr,mdio,netdev_tx_t,pci-needs_freset,pci_ids.h,phy,
      pr_cont,rcu,rculist,rx_queue,trans_start,ucast_list,workqueue}: New 
patches
      to upgrade the libdde_linux26 layer to 2.6.32 compatibility.

diff --git a/debian/changelog b/debian/changelog
index 17dbbcf..1e01325 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,12 @@
+hurd (20120408-2) UNRELEASED; urgency=low
+
+  * debian/patches/libdde_{addr_fix,addr_list,config,devres,dma_head,ethoc,
+  ethtool,fixes,group_addr,mdio,netdev_tx_t,pci-needs_freset,pci_ids.h,phy,
+  pr_cont,rcu,rculist,rx_queue,trans_start,ucast_list,workqueue}: New patches
+  to upgrade the libdde_linux26 layer to 2.6.32 compatibility.
+
+ -- Samuel Thibault <address@hidden>  Mon, 16 Apr 2012 00:31:41 +0000
+
 hurd (20120408-1) unstable; urgency=low
 
   * New upstream snapshot.
diff --git a/debian/patches/libdde_addr_fix.patch 
b/debian/patches/libdde_addr_fix.patch
new file mode 100644
index 0000000..7d6af79
--- /dev/null
+++ b/debian/patches/libdde_addr_fix.patch
@@ -0,0 +1,37 @@
+commit 0c27922e4933ceb86644f4a9b1af212ffe5aad75
+Author: Eric Dumazet <address@hidden>
+Date:   Mon Jun 8 03:49:24 2009 +0000
+
+    net: dev_addr_init() fix
+    
+    commit f001fde5eadd915f4858d22ed70d7040f48767cf
+    (net: introduce a list of device addresses dev_addr_list (v6))
+    added one regression Vegard Nossum found in its testings.
+    
+    With kmemcheck help, Vegard found some uninitialized memory
+    was read and reported to user, potentialy leaking kernel data.
+    ( thread can be found on http://lkml.org/lkml/2009/5/30/177 )
+    
+    dev_addr_init() incorrectly uses sizeof() operator. We were
+    initializing one byte instead of MAX_ADDR_LEN bytes.
+    
+    Reported-by: Vegard Nossum <address@hidden>
+    Signed-off-by: Eric Dumazet <address@hidden>
+    Acked-by: Jiri Pirko <address@hidden>
+    Signed-off-by: David S. Miller <address@hidden>
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 4913089..81b392e 100644
+--- a/libdde_linux26/lib/src/net/core/dev.c
++++ b/libdde_linux26/lib/src/net/core/dev.c
+@@ -3577,8 +3577,8 @@ static int dev_addr_init(struct net_device *dev)
+       /* rtnl_mutex must be held here */
+ 
+       INIT_LIST_HEAD(&dev->dev_addr_list);
+-      memset(addr, 0, sizeof(*addr));
+-      err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, sizeof(*addr),
++      memset(addr, 0, sizeof(addr));
++      err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, sizeof(addr),
+                           NETDEV_HW_ADDR_T_LAN);
+       if (!err) {
+               /*
diff --git a/debian/patches/libdde_addr_list.patch 
b/debian/patches/libdde_addr_list.patch
new file mode 100644
index 0000000..c1b4d96
--- /dev/null
+++ b/debian/patches/libdde_addr_list.patch
@@ -0,0 +1,423 @@
+commit f001fde5eadd915f4858d22ed70d7040f48767cf
+Author: Jiri Pirko <address@hidden>
+Date:   Tue May 5 02:48:28 2009 +0000
+
+    net: introduce a list of device addresses dev_addr_list (v6)
+    
+    v5 -> v6 (current):
+    -removed so far unused static functions
+    -corrected dev_addr_del_multiple to call del instead of add
+    
+    v4 -> v5:
+    -added device address type (suggested by davem)
+    -removed refcounting (better to have simplier code then safe potentially 
few
+     bytes)
+    
+    v3 -> v4:
+    -changed kzalloc to kmalloc in __hw_addr_add_ii()
+    -ASSERT_RTNL() avoided in dev_addr_flush() and dev_addr_init()
+    
+    v2 -> v3:
+    -removed unnecessary rcu read locking
+    -moved dev_addr_flush() calling to ensure no null dereference of dev_addr
+    
+    v1 -> v2:
+    -added forgotten ASSERT_RTNL to dev_addr_init and dev_addr_flush
+    -removed unnecessary rcu_read locking in dev_addr_init
+    -use compare_ether_addr_64bits instead of compare_ether_addr
+    -use L1_CACHE_BYTES as size for allocating struct netdev_hw_addr
+    -use call_rcu instead of rcu_synchronize
+    -moved is_etherdev_addr into __KERNEL__ ifdef
+    
+    This patch introduces a new list in struct net_device and brings a set of
+    functions to handle the work with device address list. The list is a 
replacement
+    for the original dev_addr field and because in some situations there is 
need to
+    carry several device addresses with the net device. To be backward 
compatible,
+    dev_addr is made to point to the first member of the list so original 
drivers
+    sees no difference.
+    
+    Signed-off-by: Jiri Pirko <address@hidden>
+    Signed-off-by: David S. Miller <address@hidden>
+
+Index: hurd-debian/libdde_linux26/contrib/include/linux/etherdevice.h
+===================================================================
+--- hurd-debian.orig/libdde_linux26/contrib/include/linux/etherdevice.h        
2012-04-16 00:26:43.000000000 +0000
++++ hurd-debian/libdde_linux26/contrib/include/linux/etherdevice.h     
2012-04-16 00:34:43.000000000 +0000
+@@ -182,6 +182,33 @@
+       return compare_ether_addr(addr1, addr2);
+ #endif
+ }
++
++/**
++ * is_etherdev_addr - Tell if given Ethernet address belongs to the device.
++ * @dev: Pointer to a device structure
++ * @addr: Pointer to a six-byte array containing the Ethernet address
++ *
++ * Compare passed address with all addresses of the device. Return true if the
++ * address if one of the device addresses.
++ *
++ * Note that this function calls compare_ether_addr_64bits() so take care of
++ * the right padding.
++ */
++static inline bool is_etherdev_addr(const struct net_device *dev,
++                                  const u8 addr[6 + 2])
++{
++      struct netdev_hw_addr *ha;
++      int res = 1;
++
++      rcu_read_lock();
++      for_each_dev_addr(dev, ha) {
++              res = compare_ether_addr_64bits(addr, ha->addr);
++              if (!res)
++                      break;
++      }
++      rcu_read_unlock();
++      return !res;
++}
+ #endif        /* __KERNEL__ */
+ 
+ #endif        /* _LINUX_ETHERDEVICE_H */
+Index: hurd-debian/libdde_linux26/contrib/include/linux/netdevice.h
+===================================================================
+--- hurd-debian.orig/libdde_linux26/contrib/include/linux/netdevice.h  
2012-04-16 00:33:34.000000000 +0000
++++ hurd-debian/libdde_linux26/contrib/include/linux/netdevice.h       
2012-04-16 00:34:43.000000000 +0000
+@@ -211,6 +211,16 @@
+ #define dmi_users     da_users
+ #define dmi_gusers    da_gusers
+ 
++struct netdev_hw_addr {
++      struct list_head        list;
++      unsigned char           addr[MAX_ADDR_LEN];
++      unsigned char           type;
++#define NETDEV_HW_ADDR_T_LAN  1
++#define NETDEV_HW_ADDR_T_SAN  2
++#define NETDEV_HW_ADDR_T_SLAVE        3
++      struct rcu_head         rcu_head;
++};
++
+ struct hh_cache
+ {
+       struct hh_cache *hh_next;       /* Next entry                        */
+@@ -757,8 +767,11 @@
+  */
+       unsigned long           last_rx;        /* Time of last Rx      */
+       /* Interface address info used in eth_type_trans() */
+-      unsigned char           dev_addr[MAX_ADDR_LEN]; /* hw address, (before 
bcast 
+-                                                         because most packets 
are unicast) */
++      unsigned char           *dev_addr;      /* hw address, (before bcast
++                                                 because most packets are
++                                                 unicast) */
++
++      struct list_head        dev_addr_list; /* list of device hw addresses */
+ 
+       unsigned char           broadcast[MAX_ADDR_LEN];        /* hw bcast add 
*/
+ 
+@@ -1768,6 +1781,13 @@
+       spin_unlock_bh(&dev->addr_list_lock);
+ }
+ 
++/*
++ * dev_addr_list walker. Should be used only for read access. Call with
++ * rcu_read_lock held.
++ */
++#define for_each_dev_addr(dev, ha) \
++              list_for_each_entry_rcu(ha, &dev->dev_addr_list, list)
++
+ /* These functions live elsewhere (drivers/net/net_init.c, but related) */
+ 
+ extern void           ether_setup(struct net_device *dev);
+@@ -1780,6 +1800,19 @@
+       alloc_netdev_mq(sizeof_priv, name, setup, 1)
+ extern int            register_netdev(struct net_device *dev);
+ extern void           unregister_netdev(struct net_device *dev);
++
++/* Functions used for device addresses handling */
++extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
++                      unsigned char addr_type);
++extern int dev_addr_del(struct net_device *dev, unsigned char *addr,
++                      unsigned char addr_type);
++extern int dev_addr_add_multiple(struct net_device *to_dev,
++                               struct net_device *from_dev,
++                               unsigned char addr_type);
++extern int dev_addr_del_multiple(struct net_device *to_dev,
++                               struct net_device *from_dev,
++                               unsigned char addr_type);
++
+ /* Functions used for secondary unicast and multicast support */
+ extern void           dev_set_rx_mode(struct net_device *dev);
+ extern void           __dev_set_rx_mode(struct net_device *dev);
+Index: hurd-debian/libdde_linux26/lib/src/net/core/dev.c
+===================================================================
+--- hurd-debian.orig/libdde_linux26/lib/src/net/core/dev.c     2012-04-16 
00:26:43.000000000 +0000
++++ hurd-debian/libdde_linux26/lib/src/net/core/dev.c  2012-04-16 
00:34:43.000000000 +0000
+@@ -3397,6 +3397,252 @@
+       netif_addr_unlock_bh(dev);
+ }
+ 
++/* hw addresses list handling functions */
++
++static int __hw_addr_add(struct list_head *list, unsigned char *addr,
++                       int addr_len, unsigned char addr_type)
++{
++      struct netdev_hw_addr *ha;
++      int alloc_size;
++
++      if (addr_len > MAX_ADDR_LEN)
++              return -EINVAL;
++
++      alloc_size = sizeof(*ha);
++      if (alloc_size < L1_CACHE_BYTES)
++              alloc_size = L1_CACHE_BYTES;
++      ha = kmalloc(alloc_size, GFP_ATOMIC);
++      if (!ha)
++              return -ENOMEM;
++      memcpy(ha->addr, addr, addr_len);
++      ha->type = addr_type;
++      list_add_tail_rcu(&ha->list, list);
++      return 0;
++}
++
++static void ha_rcu_free(struct rcu_head *head)
++{
++      struct netdev_hw_addr *ha;
++
++      ha = container_of(head, struct netdev_hw_addr, rcu_head);
++      kfree(ha);
++}
++
++static int __hw_addr_del_ii(struct list_head *list, unsigned char *addr,
++                          int addr_len, unsigned char addr_type,
++                          int ignore_index)
++{
++      struct netdev_hw_addr *ha;
++      int i = 0;
++
++      list_for_each_entry(ha, list, list) {
++              if (i++ != ignore_index &&
++                  !memcmp(ha->addr, addr, addr_len) &&
++                  (ha->type == addr_type || !addr_type)) {
++                      list_del_rcu(&ha->list);
++                      call_rcu(&ha->rcu_head, ha_rcu_free);
++                      return 0;
++              }
++      }
++      return -ENOENT;
++}
++
++static int __hw_addr_add_multiple_ii(struct list_head *to_list,
++                                   struct list_head *from_list,
++                                   int addr_len, unsigned char addr_type,
++                                   int ignore_index)
++{
++      int err;
++      struct netdev_hw_addr *ha, *ha2;
++      unsigned char type;
++
++      list_for_each_entry(ha, from_list, list) {
++              type = addr_type ? addr_type : ha->type;
++              err = __hw_addr_add(to_list, ha->addr, addr_len, type);
++              if (err)
++                      goto unroll;
++      }
++      return 0;
++
++unroll:
++      list_for_each_entry(ha2, from_list, list) {
++              if (ha2 == ha)
++                      break;
++              type = addr_type ? addr_type : ha2->type;
++              __hw_addr_del_ii(to_list, ha2->addr, addr_len, type,
++                               ignore_index);
++      }
++      return err;
++}
++
++static void __hw_addr_del_multiple_ii(struct list_head *to_list,
++                                    struct list_head *from_list,
++                                    int addr_len, unsigned char addr_type,
++                                    int ignore_index)
++{
++      struct netdev_hw_addr *ha;
++      unsigned char type;
++
++      list_for_each_entry(ha, from_list, list) {
++              type = addr_type ? addr_type : ha->type;
++              __hw_addr_del_ii(to_list, ha->addr, addr_len, addr_type,
++                               ignore_index);
++      }
++}
++
++static void __hw_addr_flush(struct list_head *list)
++{
++      struct netdev_hw_addr *ha, *tmp;
++
++      list_for_each_entry_safe(ha, tmp, list, list) {
++              list_del_rcu(&ha->list);
++              call_rcu(&ha->rcu_head, ha_rcu_free);
++      }
++}
++
++/* Device addresses handling functions */
++
++static void dev_addr_flush(struct net_device *dev)
++{
++      /* rtnl_mutex must be held here */
++
++      __hw_addr_flush(&dev->dev_addr_list);
++      dev->dev_addr = NULL;
++}
++
++static int dev_addr_init(struct net_device *dev)
++{
++      unsigned char addr[MAX_ADDR_LEN];
++      struct netdev_hw_addr *ha;
++      int err;
++
++      /* rtnl_mutex must be held here */
++
++      INIT_LIST_HEAD(&dev->dev_addr_list);
++      memset(addr, 0, sizeof(*addr));
++      err = __hw_addr_add(&dev->dev_addr_list, addr, sizeof(*addr),
++                          NETDEV_HW_ADDR_T_LAN);
++      if (!err) {
++              /*
++               * Get the first (previously created) address from the list
++               * and set dev_addr pointer to this location.
++               */
++              ha = list_first_entry(&dev->dev_addr_list,
++                                    struct netdev_hw_addr, list);
++              dev->dev_addr = ha->addr;
++      }
++      return err;
++}
++
++/**
++ *    dev_addr_add    - Add a device address
++ *    @dev: device
++ *    @addr: address to add
++ *    @addr_type: address type
++ *
++ *    Add a device address to the device or increase the reference count if
++ *    it already exists.
++ *
++ *    The caller must hold the rtnl_mutex.
++ */
++int dev_addr_add(struct net_device *dev, unsigned char *addr,
++               unsigned char addr_type)
++{
++      int err;
++
++      ASSERT_RTNL();
++
++      err = __hw_addr_add(&dev->dev_addr_list, addr, dev->addr_len,
++                          addr_type);
++      if (!err)
++              call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
++      return err;
++}
++EXPORT_SYMBOL(dev_addr_add);
++
++/**
++ *    dev_addr_del    - Release a device address.
++ *    @dev: device
++ *    @addr: address to delete
++ *    @addr_type: address type
++ *
++ *    Release reference to a device address and remove it from the device
++ *    if the reference count drops to zero.
++ *
++ *    The caller must hold the rtnl_mutex.
++ */
++int dev_addr_del(struct net_device *dev, unsigned char *addr,
++               unsigned char addr_type)
++{
++      int err;
++
++      ASSERT_RTNL();
++
++      err = __hw_addr_del_ii(&dev->dev_addr_list, addr, dev->addr_len,
++                             addr_type, 0);
++      if (!err)
++              call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
++      return err;
++}
++EXPORT_SYMBOL(dev_addr_del);
++
++/**
++ *    dev_addr_add_multiple   - Add device addresses from another device
++ *    @to_dev: device to which addresses will be added
++ *    @from_dev: device from which addresses will be added
++ *    @addr_type: address type - 0 means type will be used from from_dev
++ *
++ *    Add device addresses of the one device to another.
++ **
++ *    The caller must hold the rtnl_mutex.
++ */
++int dev_addr_add_multiple(struct net_device *to_dev,
++                        struct net_device *from_dev,
++                        unsigned char addr_type)
++{
++      int err;
++
++      ASSERT_RTNL();
++
++      if (from_dev->addr_len != to_dev->addr_len)
++              return -EINVAL;
++      err = __hw_addr_add_multiple_ii(&to_dev->dev_addr_list,
++                                      &from_dev->dev_addr_list,
++                                      to_dev->addr_len, addr_type, 0);
++      if (!err)
++              call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
++      return err;
++}
++EXPORT_SYMBOL(dev_addr_add_multiple);
++
++/**
++ *    dev_addr_del_multiple   - Delete device addresses by another device
++ *    @to_dev: device where the addresses will be deleted
++ *    @from_dev: device by which addresses the addresses will be deleted
++ *    @addr_type: address type - 0 means type will used from from_dev
++ *
++ *    Deletes addresses in to device by the list of addresses in from device.
++ *
++ *    The caller must hold the rtnl_mutex.
++ */
++int dev_addr_del_multiple(struct net_device *to_dev,
++                        struct net_device *from_dev,
++                        unsigned char addr_type)
++{
++      ASSERT_RTNL();
++
++      if (from_dev->addr_len != to_dev->addr_len)
++              return -EINVAL;
++      __hw_addr_del_multiple_ii(&to_dev->dev_addr_list,
++                                &from_dev->dev_addr_list,
++                                to_dev->addr_len, addr_type, 0);
++      call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
++      return 0;
++}
++EXPORT_SYMBOL(dev_addr_del_multiple);
++
++/* unicast and multicast addresses handling functions */
++
+ int __dev_addr_delete(struct dev_addr_list **list, int *count,
+                     void *addr, int alen, int glbl)
+ {
+@@ -4737,6 +4983,7 @@
+ 
+       dev->gso_max_size = GSO_MAX_SIZE;
+ 
++      dev_addr_init(dev);
+       netdev_init_queues(dev);
+ 
+       INIT_LIST_HEAD(&dev->napi_list);
+@@ -4762,6 +5009,9 @@
+ 
+       kfree(dev->_tx);
+ 
++      /* Flush device addresses */
++      dev_addr_flush(dev);
++
+       list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
+               netif_napi_del(p);
+ 
diff --git a/debian/patches/libdde_config.patch 
b/debian/patches/libdde_config.patch
new file mode 100644
index 0000000..9adfc4b
--- /dev/null
+++ b/debian/patches/libdde_config.patch
@@ -0,0 +1,10 @@
+diff --git a/libdde_linux26/include/linux/autoconf.h 
b/libdde_linux26/include/linux/autoconf.h
+index cf45b2b..908b8da 100644
+--- a/libdde_linux26/include/linux/autoconf.h
++++ b/libdde_linux26/include/linux/autoconf.h
+@@ -94,3 +94,5 @@
+ #undef CONFIG_BLK_DEV_IO_TRACE
+ 
+ #undef CONFIG_FW_LOADER
++#undef CONFIG_DMI
++#undef CONFIG_PCIEAER
diff --git a/debian/patches/libdde_devres.patch 
b/debian/patches/libdde_devres.patch
new file mode 100644
index 0000000..6c41cea
--- /dev/null
+++ b/debian/patches/libdde_devres.patch
@@ -0,0 +1,366 @@
+--- /dev/null  2011-08-03 18:03:30.000000000 +0000
++++ b/libdde_linux26/contrib/lib/devres.c      2012-04-15 23:14:01.000000000 
+0000
+@@ -0,0 +1,351 @@
++#include <linux/pci.h>
++#include <linux/io.h>
++#include <linux/module.h>
++
++void devm_ioremap_release(struct device *dev, void *res)
++{
++      iounmap(*(void __iomem **)res);
++}
++
++static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
++{
++      return *(void **)res == match_data;
++}
++
++/**
++ * devm_ioremap - Managed ioremap()
++ * @dev: Generic device to remap IO address for
++ * @offset: BUS offset to map
++ * @size: Size of map
++ *
++ * Managed ioremap().  Map is automatically unmapped on driver detach.
++ */
++void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
++                         unsigned long size)
++{
++      void __iomem **ptr, *addr;
++
++      ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
++      if (!ptr)
++              return NULL;
++
++      addr = ioremap(offset, size);
++      if (addr) {
++              *ptr = addr;
++              devres_add(dev, ptr);
++      } else
++              devres_free(ptr);
++
++      return addr;
++}
++EXPORT_SYMBOL(devm_ioremap);
++
++/**
++ * devm_ioremap_nocache - Managed ioremap_nocache()
++ * @dev: Generic device to remap IO address for
++ * @offset: BUS offset to map
++ * @size: Size of map
++ *
++ * Managed ioremap_nocache().  Map is automatically unmapped on driver
++ * detach.
++ */
++void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
++                                 unsigned long size)
++{
++      void __iomem **ptr, *addr;
++
++      ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
++      if (!ptr)
++              return NULL;
++
++      addr = ioremap_nocache(offset, size);
++      if (addr) {
++              *ptr = addr;
++              devres_add(dev, ptr);
++      } else
++              devres_free(ptr);
++
++      return addr;
++}
++EXPORT_SYMBOL(devm_ioremap_nocache);
++
++/**
++ * devm_iounmap - Managed iounmap()
++ * @dev: Generic device to unmap for
++ * @addr: Address to unmap
++ *
++ * Managed iounmap().  @addr must have been mapped using devm_ioremap*().
++ */
++void devm_iounmap(struct device *dev, void __iomem *addr)
++{
++      iounmap(addr);
++      WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
++                             (void *)addr));
++}
++EXPORT_SYMBOL(devm_iounmap);
++
++#ifdef CONFIG_HAS_IOPORT
++/*
++ * Generic iomap devres
++ */
++static void devm_ioport_map_release(struct device *dev, void *res)
++{
++      ioport_unmap(*(void __iomem **)res);
++}
++
++static int devm_ioport_map_match(struct device *dev, void *res,
++                               void *match_data)
++{
++      return *(void **)res == match_data;
++}
++
++/**
++ * devm_ioport_map - Managed ioport_map()
++ * @dev: Generic device to map ioport for
++ * @port: Port to map
++ * @nr: Number of ports to map
++ *
++ * Managed ioport_map().  Map is automatically unmapped on driver
++ * detach.
++ */
++void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
++                             unsigned int nr)
++{
++      void __iomem **ptr, *addr;
++
++      ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
++      if (!ptr)
++              return NULL;
++
++      addr = ioport_map(port, nr);
++      if (addr) {
++              *ptr = addr;
++              devres_add(dev, ptr);
++      } else
++              devres_free(ptr);
++
++      return addr;
++}
++EXPORT_SYMBOL(devm_ioport_map);
++
++/**
++ * devm_ioport_unmap - Managed ioport_unmap()
++ * @dev: Generic device to unmap for
++ * @addr: Address to unmap
++ *
++ * Managed ioport_unmap().  @addr must have been mapped using
++ * devm_ioport_map().
++ */
++void devm_ioport_unmap(struct device *dev, void __iomem *addr)
++{
++      ioport_unmap(addr);
++      WARN_ON(devres_destroy(dev, devm_ioport_map_release,
++                             devm_ioport_map_match, (void *)addr));
++}
++EXPORT_SYMBOL(devm_ioport_unmap);
++
++#ifdef CONFIG_PCI
++/*
++ * PCI iomap devres
++ */
++#define PCIM_IOMAP_MAX        PCI_ROM_RESOURCE
++
++struct pcim_iomap_devres {
++      void __iomem *table[PCIM_IOMAP_MAX];
++};
++
++static void pcim_iomap_release(struct device *gendev, void *res)
++{
++      struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
++      struct pcim_iomap_devres *this = res;
++      int i;
++
++      for (i = 0; i < PCIM_IOMAP_MAX; i++)
++              if (this->table[i])
++                      pci_iounmap(dev, this->table[i]);
++}
++
++/**
++ * pcim_iomap_table - access iomap allocation table
++ * @pdev: PCI device to access iomap table for
++ *
++ * Access iomap allocation table for @dev.  If iomap table doesn't
++ * exist and @pdev is managed, it will be allocated.  All iomaps
++ * recorded in the iomap table are automatically unmapped on driver
++ * detach.
++ *
++ * This function might sleep when the table is first allocated but can
++ * be safely called without context and guaranteed to succed once
++ * allocated.
++ */
++void __iomem * const * pcim_iomap_table(struct pci_dev *pdev)
++{
++      struct pcim_iomap_devres *dr, *new_dr;
++
++      dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
++      if (dr)
++              return dr->table;
++
++      new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
++      if (!new_dr)
++              return NULL;
++      dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
++      return dr->table;
++}
++EXPORT_SYMBOL(pcim_iomap_table);
++
++/**
++ * pcim_iomap - Managed pcim_iomap()
++ * @pdev: PCI device to iomap for
++ * @bar: BAR to iomap
++ * @maxlen: Maximum length of iomap
++ *
++ * Managed pci_iomap().  Map is automatically unmapped on driver
++ * detach.
++ */
++void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
++{
++      void __iomem **tbl;
++
++      BUG_ON(bar >= PCIM_IOMAP_MAX);
++
++      tbl = (void __iomem **)pcim_iomap_table(pdev);
++      if (!tbl || tbl[bar])   /* duplicate mappings not allowed */
++              return NULL;
++
++      tbl[bar] = pci_iomap(pdev, bar, maxlen);
++      return tbl[bar];
++}
++EXPORT_SYMBOL(pcim_iomap);
++
++/**
++ * pcim_iounmap - Managed pci_iounmap()
++ * @pdev: PCI device to iounmap for
++ * @addr: Address to unmap
++ *
++ * Managed pci_iounmap().  @addr must have been mapped using pcim_iomap().
++ */
++void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
++{
++      void __iomem **tbl;
++      int i;
++
++      pci_iounmap(pdev, addr);
++
++      tbl = (void __iomem **)pcim_iomap_table(pdev);
++      BUG_ON(!tbl);
++
++      for (i = 0; i < PCIM_IOMAP_MAX; i++)
++              if (tbl[i] == addr) {
++                      tbl[i] = NULL;
++                      return;
++              }
++      WARN_ON(1);
++}
++EXPORT_SYMBOL(pcim_iounmap);
++
++/**
++ * pcim_iomap_regions - Request and iomap PCI BARs
++ * @pdev: PCI device to map IO resources for
++ * @mask: Mask of BARs to request and iomap
++ * @name: Name used when requesting regions
++ *
++ * Request and iomap regions specified by @mask.
++ */
++int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name)
++{
++      void __iomem * const *iomap;
++      int i, rc;
++
++      iomap = pcim_iomap_table(pdev);
++      if (!iomap)
++              return -ENOMEM;
++
++      for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
++              unsigned long len;
++
++              if (!(mask & (1 << i)))
++                      continue;
++
++              rc = -EINVAL;
++              len = pci_resource_len(pdev, i);
++              if (!len)
++                      goto err_inval;
++
++              rc = pci_request_region(pdev, i, name);
++              if (rc)
++                      goto err_inval;
++
++              rc = -ENOMEM;
++              if (!pcim_iomap(pdev, i, 0))
++                      goto err_region;
++      }
++
++      return 0;
++
++ err_region:
++      pci_release_region(pdev, i);
++ err_inval:
++      while (--i >= 0) {
++              if (!(mask & (1 << i)))
++                      continue;
++              pcim_iounmap(pdev, iomap[i]);
++              pci_release_region(pdev, i);
++      }
++
++      return rc;
++}
++EXPORT_SYMBOL(pcim_iomap_regions);
++
++/**
++ * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
++ * @pdev: PCI device to map IO resources for
++ * @mask: Mask of BARs to iomap
++ * @name: Name used when requesting regions
++ *
++ * Request all PCI BARs and iomap regions specified by @mask.
++ */
++int pcim_iomap_regions_request_all(struct pci_dev *pdev, u16 mask,
++                                 const char *name)
++{
++      int request_mask = ((1 << 6) - 1) & ~mask;
++      int rc;
++
++      rc = pci_request_selected_regions(pdev, request_mask, name);
++      if (rc)
++              return rc;
++
++      rc = pcim_iomap_regions(pdev, mask, name);
++      if (rc)
++              pci_release_selected_regions(pdev, request_mask);
++      return rc;
++}
++EXPORT_SYMBOL(pcim_iomap_regions_request_all);
++
++/**
++ * pcim_iounmap_regions - Unmap and release PCI BARs
++ * @pdev: PCI device to map IO resources for
++ * @mask: Mask of BARs to unmap and release
++ *
++ * Unamp and release regions specified by @mask.
++ */
++void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask)
++{
++      void __iomem * const *iomap;
++      int i;
++
++      iomap = pcim_iomap_table(pdev);
++      if (!iomap)
++              return;
++
++      for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
++              if (!(mask & (1 << i)))
++                      continue;
++
++              pcim_iounmap(pdev, iomap[i]);
++              pci_release_region(pdev, i);
++      }
++}
++EXPORT_SYMBOL(pcim_iounmap_regions);
++#endif
++#endif
+diff --git a/libdde_linux26/lib/src/Makefile b/libdde_linux26/lib/src/Makefile
+index 358196b..f7a64da 100644
+--- a/libdde_linux26/lib/src/Makefile
++++ b/libdde_linux26/lib/src/Makefile
+@@ -103,6 +103,7 @@ SRC_C_libdde_linux26.o.a += \
+                               lib/crc32.c \
+                               lib/ctype.c \
+                               lib/cpumask.c \
++                              lib/devres.c \
+                               lib/find_next_bit.c \
+                               lib/hexdump.c \
+                               lib/idr.c \
diff --git a/debian/patches/libdde_dma_head.patch 
b/debian/patches/libdde_dma_head.patch
new file mode 100644
index 0000000..1a1c75f
--- /dev/null
+++ b/debian/patches/libdde_dma_head.patch
@@ -0,0 +1,93 @@
+commit 042a53a9e437feaf2230dd2cadcecfae9c7bfe05
+Author: Eric Dumazet <address@hidden>
+Date:   Fri Jun 5 04:04:16 2009 +0000
+
+    net: skb_shared_info optimization
+    
+    skb_dma_unmap() is quite expensive for small packets,
+    because we use two different cache lines from skb_shared_info.
+    
+    One to access nr_frags, one to access dma_maps[0]
+    
+    Instead of dma_maps being an array of MAX_SKB_FRAGS + 1 elements,
+    let dma_head alone in a new dma_head field, close to nr_frags,
+    to reduce cache lines misses.
+    
+    Tested on my dev machine (bnx2 & tg3 adapters), nice speedup !
+    
+    Signed-off-by: Eric Dumazet <address@hidden>
+    Signed-off-by: David S. Miller <address@hidden>
+
+Index: hurd-debian/libdde_linux26/contrib/include/linux/skbuff.h
+===================================================================
+--- hurd-debian.orig/libdde_linux26/contrib/include/linux/skbuff.h     
2012-04-16 00:26:40.000000000 +0000
++++ hurd-debian/libdde_linux26/contrib/include/linux/skbuff.h  2012-04-16 
00:34:56.000000000 +0000
+@@ -142,6 +142,9 @@
+       atomic_t        dataref;
+       unsigned short  nr_frags;
+       unsigned short  gso_size;
++#ifdef CONFIG_HAS_DMA
++      dma_addr_t      dma_head;
++#endif
+       /* Warning: this field is not always filled in (UFO)! */
+       unsigned short  gso_segs;
+       unsigned short  gso_type;
+@@ -152,7 +155,7 @@
+       struct sk_buff  *frag_list;
+       skb_frag_t      frags[MAX_SKB_FRAGS];
+ #ifdef CONFIG_HAS_DMA
+-      dma_addr_t      dma_maps[MAX_SKB_FRAGS + 1];
++      dma_addr_t      dma_maps[MAX_SKB_FRAGS];
+ #endif
+ };
+ 
+Index: hurd-debian/libdde_linux26/contrib/net/core/skb_dma_map.c
+===================================================================
+--- hurd-debian.orig/libdde_linux26/contrib/net/core/skb_dma_map.c     
2012-04-16 00:26:40.000000000 +0000
++++ hurd-debian/libdde_linux26/contrib/net/core/skb_dma_map.c  2012-04-16 
00:34:56.000000000 +0000
+@@ -20,7 +20,7 @@
+       if (dma_mapping_error(dev, map))
+               goto out_err;
+ 
+-      sp->dma_maps[0] = map;
++      sp->dma_head = map;
+       for (i = 0; i < sp->nr_frags; i++) {
+               skb_frag_t *fp = &sp->frags[i];
+ 
+@@ -28,7 +28,7 @@
+                                  fp->size, dir);
+               if (dma_mapping_error(dev, map))
+                       goto unwind;
+-              sp->dma_maps[i + 1] = map;
++              sp->dma_maps[i] = map;
+       }
+       sp->num_dma_maps = i + 1;
+ 
+@@ -38,10 +38,10 @@
+       while (--i >= 0) {
+               skb_frag_t *fp = &sp->frags[i];
+ 
+-              dma_unmap_page(dev, sp->dma_maps[i + 1],
++              dma_unmap_page(dev, sp->dma_maps[i],
+                              fp->size, dir);
+       }
+-      dma_unmap_single(dev, sp->dma_maps[0],
++      dma_unmap_single(dev, sp->dma_head,
+                        skb_headlen(skb), dir);
+ out_err:
+       return -ENOMEM;
+@@ -54,12 +54,12 @@
+       struct skb_shared_info *sp = skb_shinfo(skb);
+       int i;
+ 
+-      dma_unmap_single(dev, sp->dma_maps[0],
++      dma_unmap_single(dev, sp->dma_head,
+                        skb_headlen(skb), dir);
+       for (i = 0; i < sp->nr_frags; i++) {
+               skb_frag_t *fp = &sp->frags[i];
+ 
+-              dma_unmap_page(dev, sp->dma_maps[i + 1],
++              dma_unmap_page(dev, sp->dma_maps[i],
+                              fp->size, dir);
+       }
+ }
diff --git a/debian/patches/libdde_ethoc.patch 
b/debian/patches/libdde_ethoc.patch
new file mode 100644
index 0000000..eba5cdf
--- /dev/null
+++ b/debian/patches/libdde_ethoc.patch
@@ -0,0 +1,25 @@
+--- /dev/null  2011-08-03 18:03:30.000000000 +0000
++++ b/libdde_linux26/contrib/include/net/ethoc.h       2012-04-15 
22:19:57.000000000 +0000
+@@ -0,0 +1,22 @@
++/*
++ * linux/include/net/ethoc.h
++ *
++ * Copyright (C) 2008-2009 Avionic Design GmbH
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * Written by Thierry Reding <address@hidden>
++ */
++
++#ifndef LINUX_NET_ETHOC_H
++#define LINUX_NET_ETHOC_H 1
++
++struct ethoc_platform_data {
++      u8 hwaddr[IFHWADDRLEN];
++      s8 phy_id;
++};
++
++#endif /* !LINUX_NET_ETHOC_H */
++
diff --git a/debian/patches/libdde_ethtool.patch 
b/debian/patches/libdde_ethtool.patch
new file mode 100644
index 0000000..9e52d1b
--- /dev/null
+++ b/debian/patches/libdde_ethtool.patch
@@ -0,0 +1,98 @@
+diff --git a/libdde_linux26/contrib/include/linux/ethtool.h 
b/libdde_linux26/contrib/include/linux/ethtool.h
+index 27c67a5..45f34dc 100644
+--- a/libdde_linux26/contrib/include/linux/ethtool.h
++++ b/libdde_linux26/contrib/include/linux/ethtool.h
+@@ -25,11 +25,14 @@ struct ethtool_cmd {
+       __u8    phy_address;
+       __u8    transceiver;    /* Which transceiver to use */
+       __u8    autoneg;        /* Enable or disable autonegotiation */
++      __u8    mdio_support;
+       __u32   maxtxpkt;       /* Tx pkts before generating tx int */
+       __u32   maxrxpkt;       /* Rx pkts before generating rx int */
+       __u16   speed_hi;
+-      __u16   reserved2;
+-      __u32   reserved[3];
++      __u8    eth_tp_mdix;
++      __u8    reserved2;
++      __u32   lp_advertising; /* Features the link partner advertises */
++      __u32   reserved[2];
+ };
+ 
+ static inline void ethtool_cmd_speed_set(struct ethtool_cmd *ep,
+@@ -469,6 +472,13 @@ struct ethtool_ops {
+ #define       ETHTOOL_SRXFH           0x0000002a /* Set RX flow hash 
configuration */
+ #define ETHTOOL_GGRO          0x0000002b /* Get GRO enable (ethtool_value) */
+ #define ETHTOOL_SGRO          0x0000002c /* Set GRO enable (ethtool_value) */
++#define       ETHTOOL_GRXRINGS        0x0000002d /* Get RX rings available 
for LB */
++#define       ETHTOOL_GRXCLSRLCNT     0x0000002e /* Get RX class rule count */
++#define       ETHTOOL_GRXCLSRULE      0x0000002f /* Get RX classification 
rule */
++#define       ETHTOOL_GRXCLSRLALL     0x00000030 /* Get all RX classification 
rule */
++#define       ETHTOOL_SRXCLSRLDEL     0x00000031 /* Delete RX classification 
rule */
++#define       ETHTOOL_SRXCLSRLINS     0x00000032 /* Insert RX classification 
rule */
++#define       ETHTOOL_FLASHDEV        0x00000033 /* Flash firmware to device 
*/
+ 
+ /* compatibility with older code */
+ #define SPARC_ETH_GSET                ETHTOOL_GSET
+@@ -491,6 +501,11 @@ struct ethtool_ops {
+ #define SUPPORTED_Pause                       (1 << 13)
+ #define SUPPORTED_Asym_Pause          (1 << 14)
+ #define SUPPORTED_2500baseX_Full      (1 << 15)
++#define SUPPORTED_Backplane           (1 << 16)
++#define SUPPORTED_1000baseKX_Full     (1 << 17)
++#define SUPPORTED_10000baseKX4_Full   (1 << 18)
++#define SUPPORTED_10000baseKR_Full    (1 << 19)
++#define SUPPORTED_10000baseR_FEC      (1 << 20)
+ 
+ /* Indicates what features are advertised by the interface. */
+ #define ADVERTISED_10baseT_Half               (1 << 0)
+@@ -509,6 +524,11 @@ struct ethtool_ops {
+ #define ADVERTISED_Pause              (1 << 13)
+ #define ADVERTISED_Asym_Pause         (1 << 14)
+ #define ADVERTISED_2500baseX_Full     (1 << 15)
++#define ADVERTISED_Backplane          (1 << 16)
++#define ADVERTISED_1000baseKX_Full    (1 << 17)
++#define ADVERTISED_10000baseKX4_Full  (1 << 18)
++#define ADVERTISED_10000baseKR_Full   (1 << 19)
++#define ADVERTISED_10000baseR_FEC     (1 << 20)
+ 
+ /* The following are all involved in forcing a particular link
+  * mode for the device for setting things.  When getting the
+@@ -533,6 +553,7 @@ struct ethtool_ops {
+ #define PORT_MII              0x02
+ #define PORT_FIBRE            0x03
+ #define PORT_BNC              0x04
++#define PORT_OTHER            0xff
+ 
+ /* Which transceiver to use. */
+ #define XCVR_INTERNAL         0x00
+@@ -547,6 +568,11 @@ struct ethtool_ops {
+ #define AUTONEG_DISABLE               0x00
+ #define AUTONEG_ENABLE                0x01
+ 
++/* Mode MDI or MDI-X */
++#define ETH_TP_MDI_INVALID    0x00
++#define ETH_TP_MDI            0x01
++#define ETH_TP_MDI_X          0x02
++
+ /* Wake-On-Lan options. */
+ #define WAKE_PHY              (1 << 0)
+ #define WAKE_UCAST            (1 << 1)
+@@ -565,6 +591,11 @@ struct ethtool_ops {
+ #define       UDP_V6_FLOW     0x06
+ #define       SCTP_V6_FLOW    0x07
+ #define       AH_ESP_V6_FLOW  0x08
++#define       AH_V4_FLOW      0x09
++#define       ESP_V4_FLOW     0x0a
++#define       AH_V6_FLOW      0x0b
++#define       ESP_V6_FLOW     0x0c
++#define       IP_USER_FLOW    0x0d
+ 
+ /* L3-L4 network traffic flow hash options */
+ #define       RXH_DEV_PORT    (1 << 0)
+@@ -577,5 +608,6 @@ struct ethtool_ops {
+ #define       RXH_L4_B_2_3    (1 << 7) /* dst port in case of TCP/UDP/SCTP */
+ #define       RXH_DISCARD     (1 << 31)
+ 
++#define       RX_CLS_FLOW_DISC        0xffffffffffffffffULL
+ 
+ #endif /* _LINUX_ETHTOOL_H */
diff --git a/debian/patches/libdde_fixes.patch 
b/debian/patches/libdde_fixes.patch
new file mode 100644
index 0000000..54d2cf1
--- /dev/null
+++ b/debian/patches/libdde_fixes.patch
@@ -0,0 +1,13 @@
+diff --git a/libdde_linux26/contrib/mm/dmapool.c 
b/libdde_linux26/contrib/mm/dmapool.c
+index b1f0885..5c7aca4 100644
+--- a/libdde_linux26/contrib/mm/dmapool.c
++++ b/libdde_linux26/contrib/mm/dmapool.c
+@@ -37,6 +37,8 @@
+ #include <linux/types.h>
+ #include <linux/wait.h>
+ 
++#include <ddekit/timer.h>
++
+ #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
+ #define DMAPOOL_DEBUG 1
+ #endif
diff --git a/debian/patches/libdde_group_addr.patch 
b/debian/patches/libdde_group_addr.patch
new file mode 100644
index 0000000..2ea7271
--- /dev/null
+++ b/debian/patches/libdde_group_addr.patch
@@ -0,0 +1,429 @@
+commit 31278e71471399beaff9280737e52b47db4dc345
+Author: Jiri Pirko <address@hidden>
+Date:   Wed Jun 17 01:12:19 2009 +0000
+
+    net: group address list and its count
+    
+    This patch is inspired by patch recently posted by Johannes Berg. 
Basically what
+    my patch does is to group list and a count of addresses into newly 
introduced
+    structure netdev_hw_addr_list. This brings us two benefits:
+    1) struct net_device becames a bit nicer.
+    2) in the future there will be a possibility to operate with lists 
independently
+       on netdevices (with exporting right functions).
+    I wanted to introduce this patch before I'll post a multicast lists 
conversion.
+    
+    Signed-off-by: Jiri Pirko <address@hidden>
+    
+     drivers/net/bnx2.c              |    4 +-
+     drivers/net/e1000/e1000_main.c  |    4 +-
+     drivers/net/ixgbe/ixgbe_main.c  |    6 +-
+     drivers/net/mv643xx_eth.c       |    2 +-
+     drivers/net/niu.c               |    4 +-
+     drivers/net/virtio_net.c        |   10 ++--
+     drivers/s390/net/qeth_l2_main.c |    2 +-
+     include/linux/netdevice.h       |   17 +++--
+     net/core/dev.c                  |  130 
++++++++++++++++++--------------------
+     9 files changed, 89 insertions(+), 90 deletions(-)
+    Signed-off-by: David S. Miller <address@hidden>
+
+Index: hurd-debian/libdde_linux26/contrib/include/linux/netdevice.h
+===================================================================
+--- hurd-debian.orig/libdde_linux26/contrib/include/linux/netdevice.h  
2012-04-16 00:34:46.000000000 +0000
++++ hurd-debian/libdde_linux26/contrib/include/linux/netdevice.h       
2012-04-16 00:34:51.000000000 +0000
+@@ -224,6 +224,11 @@
+       struct rcu_head         rcu_head;
+ };
+ 
++struct netdev_hw_addr_list {
++      struct list_head        list;
++      int                     count;
++};
++
+ struct hh_cache
+ {
+       struct hh_cache *hh_next;       /* Next entry                        */
+@@ -741,9 +746,8 @@
+       unsigned char           addr_len;       /* hardware address length      
*/
+       unsigned short          dev_id;         /* for shared network cards */
+ 
+-      struct list_head        uc_list;        /* Secondary unicast mac
+-                                                 addresses */
+-      int                     uc_count;       /* Number of installed ucasts   
*/
++      struct netdev_hw_addr_list      uc;     /* Secondary unicast
++                                                 mac addresses */
+       int                     uc_promisc;
+       spinlock_t              addr_list_lock;
+       struct dev_addr_list    *mc_list;       /* Multicast mac addresses      
*/
+@@ -775,7 +779,8 @@
+                                                  because most packets are
+                                                  unicast) */
+ 
+-      struct list_head        dev_addr_list; /* list of device hw addresses */
++      struct netdev_hw_addr_list      dev_addrs; /* list of device
++                                                    hw addresses */
+ 
+       unsigned char           broadcast[MAX_ADDR_LEN];        /* hw bcast add 
*/
+ 
+@@ -1786,11 +1791,11 @@
+ }
+ 
+ /*
+- * dev_addr_list walker. Should be used only for read access. Call with
++ * dev_addrs walker. Should be used only for read access. Call with
+  * rcu_read_lock held.
+  */
+ #define for_each_dev_addr(dev, ha) \
+-              list_for_each_entry_rcu(ha, &dev->dev_addr_list, list)
++              list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
+ 
+ /* These functions live elsewhere (drivers/net/net_init.c, but related) */
+ 
+Index: hurd-debian/libdde_linux26/lib/src/net/core/dev.c
+===================================================================
+--- hurd-debian.orig/libdde_linux26/lib/src/net/core/dev.c     2012-04-16 
00:34:49.000000000 +0000
++++ hurd-debian/libdde_linux26/lib/src/net/core/dev.c  2012-04-16 
00:34:51.000000000 +0000
+@@ -3377,10 +3377,10 @@
+               /* Unicast addresses changes may only happen under the rtnl,
+                * therefore calling __dev_set_promiscuity here is safe.
+                */
+-              if (dev->uc_count > 0 && !dev->uc_promisc) {
++              if (dev->uc.count > 0 && !dev->uc_promisc) {
+                       __dev_set_promiscuity(dev, 1);
+                       dev->uc_promisc = 1;
+-              } else if (dev->uc_count == 0 && dev->uc_promisc) {
++              } else if (dev->uc.count == 0 && dev->uc_promisc) {
+                       __dev_set_promiscuity(dev, -1);
+                       dev->uc_promisc = 0;
+               }
+@@ -3399,9 +3399,8 @@
+ 
+ /* hw addresses list handling functions */
+ 
+-static int __hw_addr_add(struct list_head *list, int *delta,
+-                       unsigned char *addr, int addr_len,
+-                       unsigned char addr_type)
++static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char 
*addr,
++                       int addr_len, unsigned char addr_type)
+ {
+       struct netdev_hw_addr *ha;
+       int alloc_size;
+@@ -3409,7 +3408,7 @@
+       if (addr_len > MAX_ADDR_LEN)
+               return -EINVAL;
+ 
+-      list_for_each_entry(ha, list, list) {
++      list_for_each_entry(ha, &list->list, list) {
+               if (!memcmp(ha->addr, addr, addr_len) &&
+                   ha->type == addr_type) {
+                       ha->refcount++;
+@@ -3428,9 +3427,8 @@
+       ha->type = addr_type;
+       ha->refcount = 1;
+       ha->synced = false;
+-      list_add_tail_rcu(&ha->list, list);
+-      if (delta)
+-              (*delta)++;
++      list_add_tail_rcu(&ha->list, &list->list);
++      list->count++;
+       return 0;
+ }
+ 
+@@ -3442,120 +3440,121 @@
+       kfree(ha);
+ }
+ 
+-static int __hw_addr_del(struct list_head *list, int *delta,
+-                       unsigned char *addr, int addr_len,
+-                       unsigned char addr_type)
++static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char 
*addr,
++                       int addr_len, unsigned char addr_type)
+ {
+       struct netdev_hw_addr *ha;
+ 
+-      list_for_each_entry(ha, list, list) {
++      list_for_each_entry(ha, &list->list, list) {
+               if (!memcmp(ha->addr, addr, addr_len) &&
+                   (ha->type == addr_type || !addr_type)) {
+                       if (--ha->refcount)
+                               return 0;
+                       list_del_rcu(&ha->list);
+                       call_rcu(&ha->rcu_head, ha_rcu_free);
+-                      if (delta)
+-                              (*delta)--;
++                      list->count--;
+                       return 0;
+               }
+       }
+       return -ENOENT;
+ }
+ 
+-static int __hw_addr_add_multiple(struct list_head *to_list, int *to_delta,
+-                                struct list_head *from_list, int addr_len,
++static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
++                                struct netdev_hw_addr_list *from_list,
++                                int addr_len,
+                                 unsigned char addr_type)
+ {
+       int err;
+       struct netdev_hw_addr *ha, *ha2;
+       unsigned char type;
+ 
+-      list_for_each_entry(ha, from_list, list) {
++      list_for_each_entry(ha, &from_list->list, list) {
+               type = addr_type ? addr_type : ha->type;
+-              err = __hw_addr_add(to_list, to_delta, ha->addr,
+-                                  addr_len, type);
++              err = __hw_addr_add(to_list, ha->addr, addr_len, type);
+               if (err)
+                       goto unroll;
+       }
+       return 0;
+ 
+ unroll:
+-      list_for_each_entry(ha2, from_list, list) {
++      list_for_each_entry(ha2, &from_list->list, list) {
+               if (ha2 == ha)
+                       break;
+               type = addr_type ? addr_type : ha2->type;
+-              __hw_addr_del(to_list, to_delta, ha2->addr,
+-                            addr_len, type);
++              __hw_addr_del(to_list, ha2->addr, addr_len, type);
+       }
+       return err;
+ }
+ 
+-static void __hw_addr_del_multiple(struct list_head *to_list, int *to_delta,
+-                                 struct list_head *from_list, int addr_len,
++static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
++                                 struct netdev_hw_addr_list *from_list,
++                                 int addr_len,
+                                  unsigned char addr_type)
+ {
+       struct netdev_hw_addr *ha;
+       unsigned char type;
+ 
+-      list_for_each_entry(ha, from_list, list) {
++      list_for_each_entry(ha, &from_list->list, list) {
+               type = addr_type ? addr_type : ha->type;
+-              __hw_addr_del(to_list, to_delta, ha->addr,
+-                            addr_len, addr_type);
++              __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
+       }
+ }
+ 
+-static int __hw_addr_sync(struct list_head *to_list, int *to_delta,
+-                        struct list_head *from_list, int *from_delta,
++static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
++                        struct netdev_hw_addr_list *from_list,
+                         int addr_len)
+ {
+       int err = 0;
+       struct netdev_hw_addr *ha, *tmp;
+ 
+-      list_for_each_entry_safe(ha, tmp, from_list, list) {
++      list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
+               if (!ha->synced) {
+-                      err = __hw_addr_add(to_list, to_delta, ha->addr,
++                      err = __hw_addr_add(to_list, ha->addr,
+                                           addr_len, ha->type);
+                       if (err)
+                               break;
+                       ha->synced = true;
+                       ha->refcount++;
+               } else if (ha->refcount == 1) {
+-                      __hw_addr_del(to_list, to_delta, ha->addr,
+-                                    addr_len, ha->type);
+-                      __hw_addr_del(from_list, from_delta, ha->addr,
+-                                    addr_len, ha->type);
++                      __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
++                      __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
+               }
+       }
+       return err;
+ }
+ 
+-static void __hw_addr_unsync(struct list_head *to_list, int *to_delta,
+-                           struct list_head *from_list, int *from_delta,
++static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
++                           struct netdev_hw_addr_list *from_list,
+                            int addr_len)
+ {
+       struct netdev_hw_addr *ha, *tmp;
+ 
+-      list_for_each_entry_safe(ha, tmp, from_list, list) {
++      list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
+               if (ha->synced) {
+-                      __hw_addr_del(to_list, to_delta, ha->addr,
++                      __hw_addr_del(to_list, ha->addr,
+                                     addr_len, ha->type);
+                       ha->synced = false;
+-                      __hw_addr_del(from_list, from_delta, ha->addr,
++                      __hw_addr_del(from_list, ha->addr,
+                                     addr_len, ha->type);
+               }
+       }
+ }
+ 
+-
+-static void __hw_addr_flush(struct list_head *list)
++static void __hw_addr_flush(struct netdev_hw_addr_list *list)
+ {
+       struct netdev_hw_addr *ha, *tmp;
+ 
+-      list_for_each_entry_safe(ha, tmp, list, list) {
++      list_for_each_entry_safe(ha, tmp, &list->list, list) {
+               list_del_rcu(&ha->list);
+               call_rcu(&ha->rcu_head, ha_rcu_free);
+       }
++      list->count = 0;
++}
++
++static void __hw_addr_init(struct netdev_hw_addr_list *list)
++{
++      INIT_LIST_HEAD(&list->list);
++      list->count = 0;
+ }
+ 
+ /* Device addresses handling functions */
+@@ -3564,7 +3563,7 @@
+ {
+       /* rtnl_mutex must be held here */
+ 
+-      __hw_addr_flush(&dev->dev_addr_list);
++      __hw_addr_flush(&dev->dev_addrs);
+       dev->dev_addr = NULL;
+ }
+ 
+@@ -3576,16 +3575,16 @@
+ 
+       /* rtnl_mutex must be held here */
+ 
+-      INIT_LIST_HEAD(&dev->dev_addr_list);
++      __hw_addr_init(&dev->dev_addrs);
+       memset(addr, 0, sizeof(addr));
+-      err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, sizeof(addr),
++      err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
+                           NETDEV_HW_ADDR_T_LAN);
+       if (!err) {
+               /*
+                * Get the first (previously created) address from the list
+                * and set dev_addr pointer to this location.
+                */
+-              ha = list_first_entry(&dev->dev_addr_list,
++              ha = list_first_entry(&dev->dev_addrs.list,
+                                     struct netdev_hw_addr, list);
+               dev->dev_addr = ha->addr;
+       }
+@@ -3610,8 +3609,7 @@
+ 
+       ASSERT_RTNL();
+ 
+-      err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, dev->addr_len,
+-                          addr_type);
++      err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
+       if (!err)
+               call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+       return err;
+@@ -3641,11 +3639,12 @@
+        * We can not remove the first address from the list because
+        * dev->dev_addr points to that.
+        */
+-      ha = list_first_entry(&dev->dev_addr_list, struct netdev_hw_addr, list);
++      ha = list_first_entry(&dev->dev_addrs.list,
++                            struct netdev_hw_addr, list);
+       if (ha->addr == dev->dev_addr && ha->refcount == 1)
+               return -ENOENT;
+ 
+-      err = __hw_addr_del(&dev->dev_addr_list, NULL, addr, dev->addr_len,
++      err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
+                           addr_type);
+       if (!err)
+               call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+@@ -3673,8 +3672,7 @@
+ 
+       if (from_dev->addr_len != to_dev->addr_len)
+               return -EINVAL;
+-      err = __hw_addr_add_multiple(&to_dev->dev_addr_list, NULL,
+-                                   &from_dev->dev_addr_list,
++      err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
+                                    to_dev->addr_len, addr_type);
+       if (!err)
+               call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
+@@ -3700,15 +3698,14 @@
+ 
+       if (from_dev->addr_len != to_dev->addr_len)
+               return -EINVAL;
+-      __hw_addr_del_multiple(&to_dev->dev_addr_list, NULL,
+-                             &from_dev->dev_addr_list,
++      __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
+                              to_dev->addr_len, addr_type);
+       call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
+       return 0;
+ }
+ EXPORT_SYMBOL(dev_addr_del_multiple);
+ 
+-/* unicast and multicast addresses handling functions */
++/* multicast addresses handling functions */
+ 
+ int __dev_addr_delete(struct dev_addr_list **list, int *count,
+                     void *addr, int alen, int glbl)
+@@ -3784,8 +3781,8 @@
+ 
+       ASSERT_RTNL();
+ 
+-      err = __hw_addr_del(&dev->uc_list, &dev->uc_count, addr,
+-                          dev->addr_len, NETDEV_HW_ADDR_T_UNICAST);
++      err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
++                          NETDEV_HW_ADDR_T_UNICAST);
+       if (!err)
+               __dev_set_rx_mode(dev);
+       return err;
+@@ -3808,8 +3805,8 @@
+ 
+       ASSERT_RTNL();
+ 
+-      err = __hw_addr_add(&dev->uc_list, &dev->uc_count, addr,
+-                          dev->addr_len, NETDEV_HW_ADDR_T_UNICAST);
++      err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
++                          NETDEV_HW_ADDR_T_UNICAST);
+       if (!err)
+               __dev_set_rx_mode(dev);
+       return err;
+@@ -3882,8 +3879,7 @@
+       if (to->addr_len != from->addr_len)
+               return -EINVAL;
+ 
+-      err = __hw_addr_sync(&to->uc_list, &to->uc_count,
+-                           &from->uc_list, &from->uc_count, to->addr_len);
++      err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
+       if (!err)
+               __dev_set_rx_mode(to);
+       return err;
+@@ -3906,8 +3902,7 @@
+       if (to->addr_len != from->addr_len)
+               return;
+ 
+-      __hw_addr_unsync(&to->uc_list, &to->uc_count,
+-                       &from->uc_list, &from->uc_count, to->addr_len);
++      __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
+       __dev_set_rx_mode(to);
+ }
+ EXPORT_SYMBOL(dev_unicast_unsync);
+@@ -3916,15 +3911,14 @@
+ {
+       /* rtnl_mutex must be held here */
+ 
+-      __hw_addr_flush(&dev->uc_list);
+-      dev->uc_count = 0;
++      __hw_addr_flush(&dev->uc);
+ }
+ 
+ static void dev_unicast_init(struct net_device *dev)
+ {
+       /* rtnl_mutex must be held here */
+ 
+-      INIT_LIST_HEAD(&dev->uc_list);
++      __hw_addr_init(&dev->uc);
+ }
+ 
+ 
diff --git a/debian/patches/libdde_mdio.patch b/debian/patches/libdde_mdio.patch
new file mode 100644
index 0000000..6c9b0c2
--- /dev/null
+++ b/debian/patches/libdde_mdio.patch
@@ -0,0 +1,359 @@
+--- /dev/null  2011-08-03 18:03:30.000000000 +0000
++++ b/libdde_linux26/contrib/include/linux/mdio.h      2012-04-15 
22:16:31.000000000 +0000
+@@ -0,0 +1,356 @@
++/*
++ * linux/mdio.h: definitions for MDIO (clause 45) transceivers
++ * Copyright 2006-2009 Solarflare Communications Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ */
++
++#ifndef __LINUX_MDIO_H__
++#define __LINUX_MDIO_H__
++
++#include <linux/mii.h>
++
++/* MDIO Manageable Devices (MMDs). */
++#define MDIO_MMD_PMAPMD               1       /* Physical Medium Attachment/
++                                       * Physical Medium Dependent */
++#define MDIO_MMD_WIS          2       /* WAN Interface Sublayer */
++#define MDIO_MMD_PCS          3       /* Physical Coding Sublayer */
++#define MDIO_MMD_PHYXS                4       /* PHY Extender Sublayer */
++#define MDIO_MMD_DTEXS                5       /* DTE Extender Sublayer */
++#define MDIO_MMD_TC           6       /* Transmission Convergence */
++#define MDIO_MMD_AN           7       /* Auto-Negotiation */
++#define MDIO_MMD_C22EXT               29      /* Clause 22 extension */
++#define MDIO_MMD_VEND1                30      /* Vendor specific 1 */
++#define MDIO_MMD_VEND2                31      /* Vendor specific 2 */
++
++/* Generic MDIO registers. */
++#define MDIO_CTRL1            MII_BMCR
++#define MDIO_STAT1            MII_BMSR
++#define MDIO_DEVID1           MII_PHYSID1
++#define MDIO_DEVID2           MII_PHYSID2
++#define MDIO_SPEED            4       /* Speed ability */
++#define MDIO_DEVS1            5       /* Devices in package */
++#define MDIO_DEVS2            6
++#define MDIO_CTRL2            7       /* 10G control 2 */
++#define MDIO_STAT2            8       /* 10G status 2 */
++#define MDIO_PMA_TXDIS                9       /* 10G PMA/PMD transmit disable 
*/
++#define MDIO_PMA_RXDET                10      /* 10G PMA/PMD receive signal 
detect */
++#define MDIO_PMA_EXTABLE      11      /* 10G PMA/PMD extended ability */
++#define MDIO_PKGID1           14      /* Package identifier */
++#define MDIO_PKGID2           15
++#define MDIO_AN_ADVERTISE     16      /* AN advertising (base page) */
++#define MDIO_AN_LPA           19      /* AN LP abilities (base page) */
++#define MDIO_PHYXS_LNSTAT     24      /* PHY XGXS lane state */
++
++/* Media-dependent registers. */
++#define MDIO_PMA_10GBT_SWAPPOL        130     /* 10GBASE-T pair swap & 
polarity */
++#define MDIO_PMA_10GBT_TXPWR  131     /* 10GBASE-T TX power control */
++#define MDIO_PMA_10GBT_SNR    133     /* 10GBASE-T SNR margin, lane A.
++                                       * Lanes B-D are numbered 134-136. */
++#define MDIO_PMA_10GBR_FECABLE        170     /* 10GBASE-R FEC ability */
++#define MDIO_PCS_10GBX_STAT1  24      /* 10GBASE-X PCS status 1 */
++#define MDIO_PCS_10GBRT_STAT1 32      /* 10GBASE-R/-T PCS status 1 */
++#define MDIO_PCS_10GBRT_STAT2 33      /* 10GBASE-R/-T PCS status 2 */
++#define MDIO_AN_10GBT_CTRL    32      /* 10GBASE-T auto-negotiation control */
++#define MDIO_AN_10GBT_STAT    33      /* 10GBASE-T auto-negotiation status */
++
++/* LASI (Link Alarm Status Interrupt) registers, defined by XENPAK MSA. */
++#define MDIO_PMA_LASI_RXCTRL  0x9000  /* RX_ALARM control */
++#define MDIO_PMA_LASI_TXCTRL  0x9001  /* TX_ALARM control */
++#define MDIO_PMA_LASI_CTRL    0x9002  /* LASI control */
++#define MDIO_PMA_LASI_RXSTAT  0x9003  /* RX_ALARM status */
++#define MDIO_PMA_LASI_TXSTAT  0x9004  /* TX_ALARM status */
++#define MDIO_PMA_LASI_STAT    0x9005  /* LASI status */
++
++/* Control register 1. */
++/* Enable extended speed selection */
++#define MDIO_CTRL1_SPEEDSELEXT                (BMCR_SPEED1000 | BMCR_SPEED100)
++/* All speed selection bits */
++#define MDIO_CTRL1_SPEEDSEL           (MDIO_CTRL1_SPEEDSELEXT | 0x003c)
++#define MDIO_CTRL1_FULLDPLX           BMCR_FULLDPLX
++#define MDIO_CTRL1_LPOWER             BMCR_PDOWN
++#define MDIO_CTRL1_RESET              BMCR_RESET
++#define MDIO_PMA_CTRL1_LOOPBACK               0x0001
++#define MDIO_PMA_CTRL1_SPEED1000      BMCR_SPEED1000
++#define MDIO_PMA_CTRL1_SPEED100               BMCR_SPEED100
++#define MDIO_PCS_CTRL1_LOOPBACK               BMCR_LOOPBACK
++#define MDIO_PHYXS_CTRL1_LOOPBACK     BMCR_LOOPBACK
++#define MDIO_AN_CTRL1_RESTART         BMCR_ANRESTART
++#define MDIO_AN_CTRL1_ENABLE          BMCR_ANENABLE
++#define MDIO_AN_CTRL1_XNP             0x2000  /* Enable extended next page */
++
++/* 10 Gb/s */
++#define MDIO_CTRL1_SPEED10G           (MDIO_CTRL1_SPEEDSELEXT | 0x00)
++/* 10PASS-TS/2BASE-TL */
++#define MDIO_CTRL1_SPEED10P2B         (MDIO_CTRL1_SPEEDSELEXT | 0x04)
++
++/* Status register 1. */
++#define MDIO_STAT1_LPOWERABLE         0x0002  /* Low-power ability */
++#define MDIO_STAT1_LSTATUS            BMSR_LSTATUS
++#define MDIO_STAT1_FAULT              0x0080  /* Fault */
++#define MDIO_AN_STAT1_LPABLE          0x0001  /* Link partner AN ability */
++#define MDIO_AN_STAT1_ABLE            BMSR_ANEGCAPABLE
++#define MDIO_AN_STAT1_RFAULT          BMSR_RFAULT
++#define MDIO_AN_STAT1_COMPLETE                BMSR_ANEGCOMPLETE
++#define MDIO_AN_STAT1_PAGE            0x0040  /* Page received */
++#define MDIO_AN_STAT1_XNP             0x0080  /* Extended next page status */
++
++/* Speed register. */
++#define MDIO_SPEED_10G                        0x0001  /* 10G capable */
++#define MDIO_PMA_SPEED_2B             0x0002  /* 2BASE-TL capable */
++#define MDIO_PMA_SPEED_10P            0x0004  /* 10PASS-TS capable */
++#define MDIO_PMA_SPEED_1000           0x0010  /* 1000M capable */
++#define MDIO_PMA_SPEED_100            0x0020  /* 100M capable */
++#define MDIO_PMA_SPEED_10             0x0040  /* 10M capable */
++#define MDIO_PCS_SPEED_10P2B          0x0002  /* 10PASS-TS/2BASE-TL capable */
++
++/* Device present registers. */
++#define MDIO_DEVS_PRESENT(devad)      (1 << (devad))
++#define MDIO_DEVS_PMAPMD              MDIO_DEVS_PRESENT(MDIO_MMD_PMAPMD)
++#define MDIO_DEVS_WIS                 MDIO_DEVS_PRESENT(MDIO_MMD_WIS)
++#define MDIO_DEVS_PCS                 MDIO_DEVS_PRESENT(MDIO_MMD_PCS)
++#define MDIO_DEVS_PHYXS                       
MDIO_DEVS_PRESENT(MDIO_MMD_PHYXS)
++#define MDIO_DEVS_DTEXS                       
MDIO_DEVS_PRESENT(MDIO_MMD_DTEXS)
++#define MDIO_DEVS_TC                  MDIO_DEVS_PRESENT(MDIO_MMD_TC)
++#define MDIO_DEVS_AN                  MDIO_DEVS_PRESENT(MDIO_MMD_AN)
++#define MDIO_DEVS_C22EXT              MDIO_DEVS_PRESENT(MDIO_MMD_C22EXT)
++
++/* Control register 2. */
++#define MDIO_PMA_CTRL2_TYPE           0x000f  /* PMA/PMD type selection */
++#define MDIO_PMA_CTRL2_10GBCX4                0x0000  /* 10GBASE-CX4 type */
++#define MDIO_PMA_CTRL2_10GBEW         0x0001  /* 10GBASE-EW type */
++#define MDIO_PMA_CTRL2_10GBLW         0x0002  /* 10GBASE-LW type */
++#define MDIO_PMA_CTRL2_10GBSW         0x0003  /* 10GBASE-SW type */
++#define MDIO_PMA_CTRL2_10GBLX4                0x0004  /* 10GBASE-LX4 type */
++#define MDIO_PMA_CTRL2_10GBER         0x0005  /* 10GBASE-ER type */
++#define MDIO_PMA_CTRL2_10GBLR         0x0006  /* 10GBASE-LR type */
++#define MDIO_PMA_CTRL2_10GBSR         0x0007  /* 10GBASE-SR type */
++#define MDIO_PMA_CTRL2_10GBLRM                0x0008  /* 10GBASE-LRM type */
++#define MDIO_PMA_CTRL2_10GBT          0x0009  /* 10GBASE-T type */
++#define MDIO_PMA_CTRL2_10GBKX4                0x000a  /* 10GBASE-KX4 type */
++#define MDIO_PMA_CTRL2_10GBKR         0x000b  /* 10GBASE-KR type */
++#define MDIO_PMA_CTRL2_1000BT         0x000c  /* 1000BASE-T type */
++#define MDIO_PMA_CTRL2_1000BKX                0x000d  /* 1000BASE-KX type */
++#define MDIO_PMA_CTRL2_100BTX         0x000e  /* 100BASE-TX type */
++#define MDIO_PMA_CTRL2_10BT           0x000f  /* 10BASE-T type */
++#define MDIO_PCS_CTRL2_TYPE           0x0003  /* PCS type selection */
++#define MDIO_PCS_CTRL2_10GBR          0x0000  /* 10GBASE-R type */
++#define MDIO_PCS_CTRL2_10GBX          0x0001  /* 10GBASE-X type */
++#define MDIO_PCS_CTRL2_10GBW          0x0002  /* 10GBASE-W type */
++#define MDIO_PCS_CTRL2_10GBT          0x0003  /* 10GBASE-T type */
++
++/* Status register 2. */
++#define MDIO_STAT2_RXFAULT            0x0400  /* Receive fault */
++#define MDIO_STAT2_TXFAULT            0x0800  /* Transmit fault */
++#define MDIO_STAT2_DEVPRST            0xc000  /* Device present */
++#define MDIO_STAT2_DEVPRST_VAL                0x8000  /* Device present value 
*/
++#define MDIO_PMA_STAT2_LBABLE         0x0001  /* PMA loopback ability */
++#define MDIO_PMA_STAT2_10GBEW         0x0002  /* 10GBASE-EW ability */
++#define MDIO_PMA_STAT2_10GBLW         0x0004  /* 10GBASE-LW ability */
++#define MDIO_PMA_STAT2_10GBSW         0x0008  /* 10GBASE-SW ability */
++#define MDIO_PMA_STAT2_10GBLX4                0x0010  /* 10GBASE-LX4 ability 
*/
++#define MDIO_PMA_STAT2_10GBER         0x0020  /* 10GBASE-ER ability */
++#define MDIO_PMA_STAT2_10GBLR         0x0040  /* 10GBASE-LR ability */
++#define MDIO_PMA_STAT2_10GBSR         0x0080  /* 10GBASE-SR ability */
++#define MDIO_PMD_STAT2_TXDISAB                0x0100  /* PMD TX disable 
ability */
++#define MDIO_PMA_STAT2_EXTABLE                0x0200  /* Extended abilities */
++#define MDIO_PMA_STAT2_RXFLTABLE      0x1000  /* Receive fault ability */
++#define MDIO_PMA_STAT2_TXFLTABLE      0x2000  /* Transmit fault ability */
++#define MDIO_PCS_STAT2_10GBR          0x0001  /* 10GBASE-R capable */
++#define MDIO_PCS_STAT2_10GBX          0x0002  /* 10GBASE-X capable */
++#define MDIO_PCS_STAT2_10GBW          0x0004  /* 10GBASE-W capable */
++#define MDIO_PCS_STAT2_RXFLTABLE      0x1000  /* Receive fault ability */
++#define MDIO_PCS_STAT2_TXFLTABLE      0x2000  /* Transmit fault ability */
++
++/* Transmit disable register. */
++#define MDIO_PMD_TXDIS_GLOBAL         0x0001  /* Global PMD TX disable */
++#define MDIO_PMD_TXDIS_0              0x0002  /* PMD TX disable 0 */
++#define MDIO_PMD_TXDIS_1              0x0004  /* PMD TX disable 1 */
++#define MDIO_PMD_TXDIS_2              0x0008  /* PMD TX disable 2 */
++#define MDIO_PMD_TXDIS_3              0x0010  /* PMD TX disable 3 */
++
++/* Receive signal detect register. */
++#define MDIO_PMD_RXDET_GLOBAL         0x0001  /* Global PMD RX signal detect 
*/
++#define MDIO_PMD_RXDET_0              0x0002  /* PMD RX signal detect 0 */
++#define MDIO_PMD_RXDET_1              0x0004  /* PMD RX signal detect 1 */
++#define MDIO_PMD_RXDET_2              0x0008  /* PMD RX signal detect 2 */
++#define MDIO_PMD_RXDET_3              0x0010  /* PMD RX signal detect 3 */
++
++/* Extended abilities register. */
++#define MDIO_PMA_EXTABLE_10GCX4               0x0001  /* 10GBASE-CX4 ability 
*/
++#define MDIO_PMA_EXTABLE_10GBLRM      0x0002  /* 10GBASE-LRM ability */
++#define MDIO_PMA_EXTABLE_10GBT                0x0004  /* 10GBASE-T ability */
++#define MDIO_PMA_EXTABLE_10GBKX4      0x0008  /* 10GBASE-KX4 ability */
++#define MDIO_PMA_EXTABLE_10GBKR               0x0010  /* 10GBASE-KR ability */
++#define MDIO_PMA_EXTABLE_1000BT               0x0020  /* 1000BASE-T ability */
++#define MDIO_PMA_EXTABLE_1000BKX      0x0040  /* 1000BASE-KX ability */
++#define MDIO_PMA_EXTABLE_100BTX               0x0080  /* 100BASE-TX ability */
++#define MDIO_PMA_EXTABLE_10BT         0x0100  /* 10BASE-T ability */
++
++/* PHY XGXS lane state register. */
++#define MDIO_PHYXS_LNSTAT_SYNC0               0x0001
++#define MDIO_PHYXS_LNSTAT_SYNC1               0x0002
++#define MDIO_PHYXS_LNSTAT_SYNC2               0x0004
++#define MDIO_PHYXS_LNSTAT_SYNC3               0x0008
++#define MDIO_PHYXS_LNSTAT_ALIGN               0x1000
++
++/* PMA 10GBASE-T pair swap & polarity */
++#define MDIO_PMA_10GBT_SWAPPOL_ABNX   0x0001  /* Pair A/B uncrossed */
++#define MDIO_PMA_10GBT_SWAPPOL_CDNX   0x0002  /* Pair C/D uncrossed */
++#define MDIO_PMA_10GBT_SWAPPOL_AREV   0x0100  /* Pair A polarity reversed */
++#define MDIO_PMA_10GBT_SWAPPOL_BREV   0x0200  /* Pair B polarity reversed */
++#define MDIO_PMA_10GBT_SWAPPOL_CREV   0x0400  /* Pair C polarity reversed */
++#define MDIO_PMA_10GBT_SWAPPOL_DREV   0x0800  /* Pair D polarity reversed */
++
++/* PMA 10GBASE-T TX power register. */
++#define MDIO_PMA_10GBT_TXPWR_SHORT    0x0001  /* Short-reach mode */
++
++/* PMA 10GBASE-T SNR registers. */
++/* Value is SNR margin in dB, clamped to range [-127, 127], plus 0x8000. */
++#define MDIO_PMA_10GBT_SNR_BIAS               0x8000
++#define MDIO_PMA_10GBT_SNR_MAX                127
++
++/* PMA 10GBASE-R FEC ability register. */
++#define MDIO_PMA_10GBR_FECABLE_ABLE   0x0001  /* FEC ability */
++#define MDIO_PMA_10GBR_FECABLE_ERRABLE        0x0002  /* FEC error indic. 
ability */
++
++/* PCS 10GBASE-R/-T status register 1. */
++#define MDIO_PCS_10GBRT_STAT1_BLKLK   0x0001  /* Block lock attained */
++
++/* PCS 10GBASE-R/-T status register 2. */
++#define MDIO_PCS_10GBRT_STAT2_ERR     0x00ff
++#define MDIO_PCS_10GBRT_STAT2_BER     0x3f00
++
++/* AN 10GBASE-T control register. */
++#define MDIO_AN_10GBT_CTRL_ADV10G     0x1000  /* Advertise 10GBASE-T */
++
++/* AN 10GBASE-T status register. */
++#define MDIO_AN_10GBT_STAT_LPTRR      0x0200  /* LP training reset req. */
++#define MDIO_AN_10GBT_STAT_LPLTABLE   0x0400  /* LP loop timing ability */
++#define MDIO_AN_10GBT_STAT_LP10G      0x0800  /* LP is 10GBT capable */
++#define MDIO_AN_10GBT_STAT_REMOK      0x1000  /* Remote OK */
++#define MDIO_AN_10GBT_STAT_LOCOK      0x2000  /* Local OK */
++#define MDIO_AN_10GBT_STAT_MS         0x4000  /* Master/slave config */
++#define MDIO_AN_10GBT_STAT_MSFLT      0x8000  /* Master/slave config fault */
++
++/* LASI RX_ALARM control/status registers. */
++#define MDIO_PMA_LASI_RX_PHYXSLFLT    0x0001  /* PHY XS RX local fault */
++#define MDIO_PMA_LASI_RX_PCSLFLT      0x0008  /* PCS RX local fault */
++#define MDIO_PMA_LASI_RX_PMALFLT      0x0010  /* PMA/PMD RX local fault */
++#define MDIO_PMA_LASI_RX_OPTICPOWERFLT        0x0020  /* RX optical power 
fault */
++#define MDIO_PMA_LASI_RX_WISLFLT      0x0200  /* WIS local fault */
++
++/* LASI TX_ALARM control/status registers. */
++#define MDIO_PMA_LASI_TX_PHYXSLFLT    0x0001  /* PHY XS TX local fault */
++#define MDIO_PMA_LASI_TX_PCSLFLT      0x0008  /* PCS TX local fault */
++#define MDIO_PMA_LASI_TX_PMALFLT      0x0010  /* PMA/PMD TX local fault */
++#define MDIO_PMA_LASI_TX_LASERPOWERFLT        0x0080  /* Laser output power 
fault */
++#define MDIO_PMA_LASI_TX_LASERTEMPFLT 0x0100  /* Laser temperature fault */
++#define MDIO_PMA_LASI_TX_LASERBICURRFLT       0x0200  /* Laser bias current 
fault */
++
++/* LASI control/status registers. */
++#define MDIO_PMA_LASI_LSALARM         0x0001  /* LS_ALARM enable/status */
++#define MDIO_PMA_LASI_TXALARM         0x0002  /* TX_ALARM enable/status */
++#define MDIO_PMA_LASI_RXALARM         0x0004  /* RX_ALARM enable/status */
++
++/* Mapping between MDIO PRTAD/DEVAD and mii_ioctl_data::phy_id */
++
++#define MDIO_PHY_ID_C45                       0x8000
++#define MDIO_PHY_ID_PRTAD             0x03e0
++#define MDIO_PHY_ID_DEVAD             0x001f
++#define MDIO_PHY_ID_C45_MASK                                          \
++      (MDIO_PHY_ID_C45 | MDIO_PHY_ID_PRTAD | MDIO_PHY_ID_DEVAD)
++
++static inline __u16 mdio_phy_id_c45(int prtad, int devad)
++{
++      return MDIO_PHY_ID_C45 | (prtad << 5) | devad;
++}
++
++static inline bool mdio_phy_id_is_c45(int phy_id)
++{
++      return (phy_id & MDIO_PHY_ID_C45) && !(phy_id & ~MDIO_PHY_ID_C45_MASK);
++}
++
++static inline __u16 mdio_phy_id_prtad(int phy_id)
++{
++      return (phy_id & MDIO_PHY_ID_PRTAD) >> 5;
++}
++
++static inline __u16 mdio_phy_id_devad(int phy_id)
++{
++      return phy_id & MDIO_PHY_ID_DEVAD;
++}
++
++#define MDIO_SUPPORTS_C22             1
++#define MDIO_SUPPORTS_C45             2
++
++#ifdef __KERNEL__ 
++
++/**
++ * struct mdio_if_info - Ethernet controller MDIO interface
++ * @prtad: PRTAD of the PHY (%MDIO_PRTAD_NONE if not present/unknown)
++ * @mmds: Mask of MMDs expected to be present in the PHY.  This must be
++ *    non-zero unless @prtad = %MDIO_PRTAD_NONE.
++ * @mode_support: MDIO modes supported.  If %MDIO_SUPPORTS_C22 is set then
++ *    MII register access will be passed through with @devad =
++ *    %MDIO_DEVAD_NONE.  If %MDIO_EMULATE_C22 is set then access to
++ *    commonly used clause 22 registers will be translated into
++ *    clause 45 registers.
++ * @dev: Net device structure
++ * @mdio_read: Register read function; returns value or negative error code
++ * @mdio_write: Register write function; returns 0 or negative error code
++ */
++struct mdio_if_info {
++      int prtad;
++      u32 mmds;
++      unsigned mode_support;
++
++      struct net_device *dev;
++      int (*mdio_read)(struct net_device *dev, int prtad, int devad,
++                       u16 addr);
++      int (*mdio_write)(struct net_device *dev, int prtad, int devad,
++                        u16 addr, u16 val);
++};
++
++#define MDIO_PRTAD_NONE                       (-1)
++#define MDIO_DEVAD_NONE                       (-1)
++#define MDIO_EMULATE_C22              4
++
++struct ethtool_cmd;
++struct ethtool_pauseparam;
++extern int mdio45_probe(struct mdio_if_info *mdio, int prtad);
++extern int mdio_set_flag(const struct mdio_if_info *mdio,
++                       int prtad, int devad, u16 addr, int mask,
++                       bool sense);
++extern int mdio45_links_ok(const struct mdio_if_info *mdio, u32 mmds);
++extern int mdio45_nway_restart(const struct mdio_if_info *mdio);
++extern void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
++                                    struct ethtool_cmd *ecmd,
++                                    u32 npage_adv, u32 npage_lpa);
++extern void
++mdio45_ethtool_spauseparam_an(const struct mdio_if_info *mdio,
++                            const struct ethtool_pauseparam *ecmd);
++
++/**
++ * mdio45_ethtool_gset - get settings for ETHTOOL_GSET
++ * @mdio: MDIO interface
++ * @ecmd: Ethtool request structure
++ *
++ * Since the CSRs for auto-negotiation using next pages are not fully
++ * standardised, this function does not attempt to decode them.  Use
++ * mdio45_ethtool_gset_npage() to specify advertisement bits from next
++ * pages.
++ */
++static inline void mdio45_ethtool_gset(const struct mdio_if_info *mdio,
++                                     struct ethtool_cmd *ecmd)
++{
++      mdio45_ethtool_gset_npage(mdio, ecmd, 0, 0);
++}
++
++extern int mdio_mii_ioctl(const struct mdio_if_info *mdio,
++                        struct mii_ioctl_data *mii_data, int cmd);
++
++#endif /* __KERNEL__ */
++#endif /* __LINUX_MDIO_H__ */
diff --git a/debian/patches/libdde_netdev_tx_t.patch 
b/debian/patches/libdde_netdev_tx_t.patch
new file mode 100644
index 0000000..73ff710
--- /dev/null
+++ b/debian/patches/libdde_netdev_tx_t.patch
@@ -0,0 +1,66 @@
+commit dc1f8bf68b311b1537cb65893430b6796118498a
+Author: Stephen Hemminger <address@hidden>
+Date:   Mon Aug 31 19:50:40 2009 +0000
+
+    netdev: change transmit to limited range type
+    
+    The transmit function should only return one of three possible values,
+    some drivers got confused and returned errno's or other values.
+    This changes the definition so that this can be caught at compile time.
+    
+    Signed-off-by: Stephen Hemminger <address@hidden>
+    Signed-off-by: David S. Miller <address@hidden>
+
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 60d3aac..376a2e1 100644
+--- a/libdde_linux26/contrib/include/linux/netdevice.h
++++ b/libdde_linux26/contrib/include/linux/netdevice.h
+@@ -79,17 +79,19 @@ struct wireless_dev;
+ #define net_xmit_eval(e)      ((e) == NET_XMIT_CN? 0 : (e))
+ #define net_xmit_errno(e)     ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
+ 
++/* Driver transmit return codes */
++enum netdev_tx {
++      NETDEV_TX_OK = 0,       /* driver took care of packet */
++      NETDEV_TX_BUSY,         /* driver tx path was busy*/
++      NETDEV_TX_LOCKED = -1,  /* driver tx lock was already taken */
++};
++typedef enum netdev_tx netdev_tx_t;
++
+ #endif
+ 
+ #define MAX_ADDR_LEN  32              /* Largest hardware address length */
+ 
+-/* Driver transmit return codes */
+-#define NETDEV_TX_OK 0                /* driver took care of packet */
+-#define NETDEV_TX_BUSY 1      /* driver tx path was busy*/
+-#define NETDEV_TX_LOCKED -1   /* driver tx lock was already taken */
+-
+ #ifdef  __KERNEL__
+-
+ /*
+  *    Compute the worst case header length according to the protocols
+  *    used.
+@@ -507,9 +509,11 @@ struct netdev_queue {
+  *     This function is called when network device transistions to the down
+  *     state.
+  *
+- * int (*ndo_start_xmit)(struct sk_buff *skb, struct net_device *dev);
++ * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
++ *                               struct net_device *dev);
+  *    Called when a packet needs to be transmitted.
+- *    Must return NETDEV_TX_OK , NETDEV_TX_BUSY, or NETDEV_TX_LOCKED,
++ *    Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
++ *        (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
+  *    Required can not be NULL.
+  *
+  * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
+@@ -580,7 +584,7 @@ struct net_device_ops {
+       void                    (*ndo_uninit)(struct net_device *dev);
+       int                     (*ndo_open)(struct net_device *dev);
+       int                     (*ndo_stop)(struct net_device *dev);
+-      int                     (*ndo_start_xmit) (struct sk_buff *skb,
++      netdev_tx_t             (*ndo_start_xmit) (struct sk_buff *skb,
+                                                  struct net_device *dev);
+       u16                     (*ndo_select_queue)(struct net_device *dev,
+                                                   struct sk_buff *skb);
diff --git a/debian/patches/libdde_pci-needs_freset.patch 
b/debian/patches/libdde_pci-needs_freset.patch
new file mode 100644
index 0000000..cc1f6ae
--- /dev/null
+++ b/debian/patches/libdde_pci-needs_freset.patch
@@ -0,0 +1,39 @@
+commit 260d703adc5f275e3ba7ddff6e2e0217bc613b35
+Author: Mike Mason <address@hidden>
+Date:   Thu Jul 30 15:33:21 2009 -0700
+
+    PCI: support for PCI Express fundamental reset
+    
+    This is the first of three patches that implement a bit field that PCI
+    Express device drivers can use to indicate they need a fundamental reset
+    during error recovery.
+    
+    By default, the EEH framework on powerpc does what's known as a "hot
+    reset" during recovery of a PCI Express device.  We've found a case
+    where the device needs a "fundamental reset" to recover properly.  The
+    current PCI error recovery and EEH frameworks do not support this
+    distinction.
+    
+    The attached patch (courtesy of Richard Lary) adds a bit field to
+    pci_dev that indicates whether the device requires a fundamental reset
+    during recovery.
+    
+    These patches supersede the previously submitted patch that implemented
+    a fundamental reset bit field.
+    
+    Signed-off-by: Mike Mason <address@hidden>
+    Signed-off-by: Richard Lary <address@hidden>
+    Signed-off-by: Jesse Barnes <address@hidden>
+
+Index: hurd-debian/libdde_linux26/contrib/include/linux/pci.h
+===================================================================
+--- hurd-debian.orig/libdde_linux26/contrib/include/linux/pci.h        
2012-04-16 00:26:44.000000000 +0000
++++ hurd-debian/libdde_linux26/contrib/include/linux/pci.h     2012-04-16 
00:34:37.000000000 +0000
+@@ -256,6 +256,7 @@
+       unsigned int    ari_enabled:1;  /* ARI forwarding */
+       unsigned int    is_managed:1;
+       unsigned int    is_pcie:1;
++      unsigned int    needs_freset:1; /* Dev requires fundamental reset */
+       unsigned int    state_saved:1;
+       pci_dev_flags_t dev_flags;
+       atomic_t        enable_cnt;     /* pci_enable_device has been called */
diff --git a/debian/patches/libdde_pci_ids.h.patch 
b/debian/patches/libdde_pci_ids.h.patch
new file mode 100644
index 0000000..df63d98
--- /dev/null
+++ b/debian/patches/libdde_pci_ids.h.patch
@@ -0,0 +1,515 @@
+--- a/libdde_linux26/contrib/include/linux/pci_ids.h   2012-04-15 
20:31:32.000000000 +0000
++++ b/libdde_linux26/contrib/include/linux/pci_ids.h   2012-03-17 
10:14:52.000000000 +0000
+@@ -2,6 +2,9 @@
+  *    PCI Class, Vendor and Device IDs
+  *
+  *    Please keep sorted.
++ *
++ *    Do not add new entries to this file unless the definitions
++ *    are shared between multiple drivers.
+  */
+ 
+ /* Device classes and subclasses */
+@@ -104,6 +107,7 @@
+ #define PCI_CLASS_SERIAL_USB_UHCI     0x0c0300
+ #define PCI_CLASS_SERIAL_USB_OHCI     0x0c0310
+ #define PCI_CLASS_SERIAL_USB_EHCI     0x0c0320
++#define PCI_CLASS_SERIAL_USB_XHCI     0x0c0330
+ #define PCI_CLASS_SERIAL_FIBER                0x0c04
+ #define PCI_CLASS_SERIAL_SMBUS                0x0c05
+ 
+@@ -389,6 +393,9 @@
+ #define PCI_DEVICE_ID_VLSI_82C147     0x0105
+ #define PCI_DEVICE_ID_VLSI_VAS96011   0x0702
+ 
++/* AMD RD890 Chipset */
++#define PCI_DEVICE_ID_RD890_IOMMU     0x5a23
++
+ #define PCI_VENDOR_ID_ADL             0x1005
+ #define PCI_DEVICE_ID_ADL_2301                0x2301
+ 
+@@ -478,6 +485,9 @@
+ #define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE 0x0361
+ #define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL        0x252
+ 
++#define PCI_SUBVENDOR_ID_IBM          0x1014
++#define PCI_SUBDEVICE_ID_IBM_SATURN_SERIAL_ONE_PORT   0x03d4
++
+ #define PCI_VENDOR_ID_UNISYS          0x1018
+ #define PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR 0x001C
+ 
+@@ -526,6 +536,7 @@
+ #define PCI_DEVICE_ID_AMD_OPUS_7443   0x7443
+ #define PCI_DEVICE_ID_AMD_VIPER_7443  0x7443
+ #define PCI_DEVICE_ID_AMD_OPUS_7445   0x7445
++#define PCI_DEVICE_ID_AMD_8111_PCI    0x7460
+ #define PCI_DEVICE_ID_AMD_8111_LPC    0x7468
+ #define PCI_DEVICE_ID_AMD_8111_IDE    0x7469
+ #define PCI_DEVICE_ID_AMD_8111_SMBUS2 0x746a
+@@ -535,6 +546,8 @@
+ #define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450
+ #define PCI_DEVICE_ID_AMD_8131_APIC   0x7451
+ #define PCI_DEVICE_ID_AMD_8132_BRIDGE 0x7458
++#define PCI_DEVICE_ID_AMD_HUDSON2_SMBUS       0x780b
++#define PCI_DEVICE_ID_AMD_CS5535_IDE    0x208F
+ #define PCI_DEVICE_ID_AMD_CS5536_ISA    0x2090
+ #define PCI_DEVICE_ID_AMD_CS5536_FLASH  0x2091
+ #define PCI_DEVICE_ID_AMD_CS5536_AUDIO  0x2093
+@@ -543,9 +556,10 @@
+ #define PCI_DEVICE_ID_AMD_CS5536_UDC    0x2096
+ #define PCI_DEVICE_ID_AMD_CS5536_UOC    0x2097
+ #define PCI_DEVICE_ID_AMD_CS5536_IDE    0x209A
+-
+ #define PCI_DEVICE_ID_AMD_LX_VIDEO  0x2081
+ #define PCI_DEVICE_ID_AMD_LX_AES    0x2082
++#define PCI_DEVICE_ID_AMD_HUDSON2_IDE         0x780c
++#define PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE    0x7800
+ 
+ #define PCI_VENDOR_ID_TRIDENT         0x1023
+ #define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX       0x2000
+@@ -591,6 +605,8 @@
+ #define PCI_DEVICE_ID_MATROX_G550     0x2527
+ #define PCI_DEVICE_ID_MATROX_VIA      0x4536
+ 
++#define PCI_VENDOR_ID_MOBILITY_ELECTRONICS    0x14f2
++
+ #define PCI_VENDOR_ID_CT              0x102c
+ #define PCI_DEVICE_ID_CT_69000                0x00c0
+ #define PCI_DEVICE_ID_CT_65545                0x00d8
+@@ -766,6 +782,7 @@
+ #define PCI_DEVICE_ID_TI_X515         0x8036
+ #define PCI_DEVICE_ID_TI_XX12         0x8039
+ #define PCI_DEVICE_ID_TI_XX12_FM      0x803b
++#define PCI_DEVICE_ID_TI_XIO2000A     0x8231
+ #define PCI_DEVICE_ID_TI_1130         0xac12
+ #define PCI_DEVICE_ID_TI_1031         0xac13
+ #define PCI_DEVICE_ID_TI_1131         0xac15
+@@ -834,6 +851,8 @@
+ #define PCI_DEVICE_ID_PROMISE_20276   0x5275
+ #define PCI_DEVICE_ID_PROMISE_20277   0x7275
+ 
++#define PCI_VENDOR_ID_FOXCONN         0x105b
++
+ #define PCI_VENDOR_ID_UMC             0x1060
+ #define PCI_DEVICE_ID_UMC_UM8673F     0x0101
+ #define PCI_DEVICE_ID_UMC_UM8886BF    0x673a
+@@ -873,6 +892,7 @@
+ #define PCI_DEVICE_ID_APPLE_SH_SUNGEM   0x0051
+ #define PCI_DEVICE_ID_APPLE_U3L_AGP   0x0058
+ #define PCI_DEVICE_ID_APPLE_U3H_AGP   0x0059
++#define PCI_DEVICE_ID_APPLE_U4_PCIE   0x005b
+ #define PCI_DEVICE_ID_APPLE_IPID2_AGP 0x0066
+ #define PCI_DEVICE_ID_APPLE_IPID2_ATA 0x0069
+ #define PCI_DEVICE_ID_APPLE_IPID2_FW  0x006a
+@@ -941,6 +961,32 @@
+ #define PCI_DEVICE_ID_SUN_TOMATILLO   0xa801
+ #define PCI_DEVICE_ID_SUN_CASSINI     0xabba
+ 
++#define PCI_VENDOR_ID_NI              0x1093
++#define PCI_DEVICE_ID_NI_PCI2322      0xd130
++#define PCI_DEVICE_ID_NI_PCI2324      0xd140
++#define PCI_DEVICE_ID_NI_PCI2328      0xd150
++#define PCI_DEVICE_ID_NI_PXI8422_2322 0xd190
++#define PCI_DEVICE_ID_NI_PXI8422_2324 0xd1a0
++#define PCI_DEVICE_ID_NI_PXI8420_2322 0xd1d0
++#define PCI_DEVICE_ID_NI_PXI8420_2324 0xd1e0
++#define PCI_DEVICE_ID_NI_PXI8420_2328 0xd1f0
++#define PCI_DEVICE_ID_NI_PXI8420_23216        0xd1f1
++#define PCI_DEVICE_ID_NI_PCI2322I     0xd250
++#define PCI_DEVICE_ID_NI_PCI2324I     0xd270
++#define PCI_DEVICE_ID_NI_PCI23216     0xd2b0
++#define PCI_DEVICE_ID_NI_PXI8430_2322 0x7080
++#define PCI_DEVICE_ID_NI_PCI8430_2322 0x70db
++#define PCI_DEVICE_ID_NI_PXI8430_2324 0x70dd
++#define PCI_DEVICE_ID_NI_PCI8430_2324 0x70df
++#define PCI_DEVICE_ID_NI_PXI8430_2328 0x70e2
++#define PCI_DEVICE_ID_NI_PCI8430_2328 0x70e4
++#define PCI_DEVICE_ID_NI_PXI8430_23216        0x70e6
++#define PCI_DEVICE_ID_NI_PCI8430_23216        0x70e7
++#define PCI_DEVICE_ID_NI_PXI8432_2322 0x70e8
++#define PCI_DEVICE_ID_NI_PCI8432_2322 0x70ea
++#define PCI_DEVICE_ID_NI_PXI8432_2324 0x70ec
++#define PCI_DEVICE_ID_NI_PCI8432_2324 0x70ee
++
+ #define PCI_VENDOR_ID_CMD             0x1095
+ #define PCI_DEVICE_ID_CMD_643         0x0643
+ #define PCI_DEVICE_ID_CMD_646         0x0646
+@@ -976,6 +1022,7 @@
+ #define PCI_DEVICE_ID_PLX_PCI200SYN   0x3196
+ #define PCI_DEVICE_ID_PLX_9030          0x9030
+ #define PCI_DEVICE_ID_PLX_9050                0x9050
++#define PCI_DEVICE_ID_PLX_9056                0x9056
+ #define PCI_DEVICE_ID_PLX_9080                0x9080
+ #define PCI_DEVICE_ID_PLX_GTEK_SERIAL2        0xa001
+ 
+@@ -1037,8 +1084,6 @@
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SMBUS       0x0034
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE 0x0035
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA        0x0036
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_10                0x0037
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_11                0x0038
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2       0x003e
+ #define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_ULTRA 0x0040
+ #define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800       0x0041
+@@ -1049,21 +1094,16 @@
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE 0x0053
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA        0x0054
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2       0x0055
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_8         0x0056
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_9         0x0057
+ #define PCI_DEVICE_ID_NVIDIA_CK804_AUDIO      0x0059
+ #define PCI_DEVICE_ID_NVIDIA_CK804_PCIE               0x005d
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS    0x0064
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE      0x0065
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_2         0x0066
+ #define PCI_DEVICE_ID_NVIDIA_MCP2_MODEM               0x0069
+ #define PCI_DEVICE_ID_NVIDIA_MCP2_AUDIO               0x006a
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS   0x0084
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE     0x0085
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_4         0x0086
+ #define PCI_DEVICE_ID_NVIDIA_MCP2S_MODEM      0x0089
+ #define PCI_DEVICE_ID_NVIDIA_CK8_AUDIO                0x008a
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_5         0x008c
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA    0x008e
+ #define PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GT   0x0090
+ #define PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GTX 0x0091
+@@ -1079,15 +1119,12 @@
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE3          0x00d1
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS    0x00d4
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE      0x00d5
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_3         0x00d6
+ #define PCI_DEVICE_ID_NVIDIA_MCP3_MODEM               0x00d9
+ #define PCI_DEVICE_ID_NVIDIA_MCP3_AUDIO               0x00da
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_7         0x00df
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE3S         0x00e1
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA    0x00e3
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SMBUS   0x00e4
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE     0x00e5
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_6         0x00e6
+ #define PCI_DEVICE_ID_NVIDIA_CK8S_AUDIO               0x00ea
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2   0x00ee
+ #define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_ALT1 0x00f0
+@@ -1147,7 +1184,6 @@
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE_SMBUS     0x01b4
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE_IDE               0x01bc
+ #define PCI_DEVICE_ID_NVIDIA_MCP1_MODEM               0x01c1
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_1         0x01c3
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE2          0x01e0
+ #define PCI_DEVICE_ID_NVIDIA_GEFORCE3         0x0200
+ #define PCI_DEVICE_ID_NVIDIA_GEFORCE3_1               0x0201
+@@ -1170,8 +1206,6 @@
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE 0x036E
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA        0x037E
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2       0x037F
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_12                0x0268
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_13                0x0269
+ #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800 0x0280
+ #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800_8X    0x0281
+ #define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800SE     0x0282
+@@ -1218,42 +1252,22 @@
+ #define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_2    0x0348
+ #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO1000       0x034C
+ #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100         0x034E
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_14              0x0372
+ #define PCI_DEVICE_ID_NVIDIA_NVENET_15              0x0373
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_16              0x03E5
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_17              0x03E6
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA      0x03E7
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SMBUS           0x03EB
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE       0x03EC
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_18              0x03EE
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_19              0x03EF
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2     0x03F6
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3     0x03F7
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_SMBUS           0x0446
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE     0x0448
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_20              0x0450
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_21              0x0451
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_22              0x0452
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_23              0x0453
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_24              0x054C
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_25              0x054D
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_26              0x054E
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_27              0x054F
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_28              0x07DC
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_29              0x07DD
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_30              0x07DE
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_31              0x07DF
++#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_SMBUS     0x0542
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE       0x0560
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE       0x056C
++#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP78S_SMBUS    0x0752
+ #define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE       0x0759
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_32              0x0760
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_33              0x0761
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_34              0x0762
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_35              0x0763
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_36              0x0AB0
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_37              0x0AB1
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_38              0x0AB2
+-#define PCI_DEVICE_ID_NVIDIA_NVENET_39              0x0AB3
++#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_SMBUS     0x07D8
++#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS     0x0AA2
++#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA            0x0D85
+ 
+ #define PCI_VENDOR_ID_IMS             0x10e0
+ #define PCI_DEVICE_ID_IMS_TT128               0x9128
+@@ -1281,6 +1295,13 @@
+ 
+ #define PCI_VENDOR_ID_CREATIVE                0x1102 /* duplicate: ECTIVA */
+ #define PCI_DEVICE_ID_CREATIVE_EMU10K1        0x0002
++#define PCI_DEVICE_ID_CREATIVE_20K1   0x0005
++#define PCI_DEVICE_ID_CREATIVE_20K2   0x000b
++#define PCI_SUBDEVICE_ID_CREATIVE_SB0760      0x0024
++#define PCI_SUBDEVICE_ID_CREATIVE_SB08801     0x0041
++#define PCI_SUBDEVICE_ID_CREATIVE_SB08802     0x0042
++#define PCI_SUBDEVICE_ID_CREATIVE_SB08803     0x0043
++#define PCI_SUBDEVICE_ID_CREATIVE_HENDRIX     0x6000
+ 
+ #define PCI_VENDOR_ID_ECTIVA          0x1102 /* duplicate: CREATIVE */
+ #define PCI_DEVICE_ID_ECTIVA_EV1938   0x8938
+@@ -1373,7 +1394,7 @@
+ #define PCI_DEVICE_ID_VIA_82C598_1    0x8598
+ #define PCI_DEVICE_ID_VIA_838X_1      0xB188
+ #define PCI_DEVICE_ID_VIA_83_87XX_1   0xB198
+-#define PCI_DEVICE_ID_VIA_C409_IDE    0XC409
++#define PCI_DEVICE_ID_VIA_VX855_IDE   0xC409
+ #define PCI_DEVICE_ID_VIA_ANON                0xFFFF
+ 
+ #define PCI_VENDOR_ID_SIEMENS           0x110A
+@@ -1473,6 +1494,7 @@
+ #define PCI_DEVICE_ID_SERVERWORKS_HT1000IDE 0x0214
+ #define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2 0x0217
+ #define PCI_DEVICE_ID_SERVERWORKS_CSB6LPC 0x0227
++#define PCI_DEVICE_ID_SERVERWORKS_HT1100LD 0x0408
+ 
+ #define PCI_VENDOR_ID_SBE             0x1176
+ #define PCI_DEVICE_ID_SBE_WANXL100    0x0301
+@@ -1516,6 +1538,8 @@
+ #define PCI_DEVICE_ID_ARTOP_ATP860R   0x0007
+ #define PCI_DEVICE_ID_ARTOP_ATP865    0x0008
+ #define PCI_DEVICE_ID_ARTOP_ATP865R   0x0009
++#define PCI_DEVICE_ID_ARTOP_ATP867A   0x000A
++#define PCI_DEVICE_ID_ARTOP_ATP867B   0x000B
+ #define PCI_DEVICE_ID_ARTOP_AEC7610   0x8002
+ #define PCI_DEVICE_ID_ARTOP_AEC7612UW 0x8010
+ #define PCI_DEVICE_ID_ARTOP_AEC7612U  0x8020
+@@ -1813,6 +1837,10 @@
+ #define PCI_SUBDEVICE_ID_HYPERCOPE_METRO      0x0107
+ #define PCI_SUBDEVICE_ID_HYPERCOPE_CHAMP2     0x0108
+ 
++#define PCI_VENDOR_ID_DIGIGRAM                0x1369
++#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_SERIAL_SUBSYSTEM   0xc001
++#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_CAE_SERIAL_SUBSYSTEM       0xc002
++
+ #define PCI_VENDOR_ID_KAWASAKI                0x136b
+ #define PCI_DEVICE_ID_MCHIP_KL5A72002 0xff01
+ 
+@@ -1880,6 +1908,8 @@
+ #define PCI_SUBDEVICE_ID_CCD_SWYX4S   0xB540
+ #define PCI_SUBDEVICE_ID_CCD_JH4S20   0xB550
+ #define PCI_SUBDEVICE_ID_CCD_IOB8ST_1 0xB552
++#define PCI_SUBDEVICE_ID_CCD_JHSE1    0xB553
++#define PCI_SUBDEVICE_ID_CCD_JH8S     0xB55B
+ #define PCI_SUBDEVICE_ID_CCD_BN4S     0xB560
+ #define PCI_SUBDEVICE_ID_CCD_BN8S     0xB562
+ #define PCI_SUBDEVICE_ID_CCD_BNE1     0xB563
+@@ -1932,6 +1962,8 @@
+ #define PCI_DEVICE_ID_LAVA_DSERIAL    0x0100 /* 2x 16550 */
+ #define PCI_DEVICE_ID_LAVA_QUATRO_A   0x0101 /* 2x 16550, half of 4 port */
+ #define PCI_DEVICE_ID_LAVA_QUATRO_B   0x0102 /* 2x 16550, half of 4 port */
++#define PCI_DEVICE_ID_LAVA_QUATTRO_A  0x0120 /* 2x 16550A, half of 4 port */
++#define PCI_DEVICE_ID_LAVA_QUATTRO_B  0x0121 /* 2x 16550A, half of 4 port */
+ #define PCI_DEVICE_ID_LAVA_OCTO_A     0x0180 /* 4x 16550A, half of 8 port */
+ #define PCI_DEVICE_ID_LAVA_OCTO_B     0x0181 /* 4x 16550A, half of 8 port */
+ #define PCI_DEVICE_ID_LAVA_PORT_PLUS  0x0200 /* 2x 16650 */
+@@ -1962,15 +1994,21 @@
+ #define PCI_DEVICE_ID_OXSEMI_PCIe952_1_U      0xC118
+ #define PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU     0xC11C
+ #define PCI_DEVICE_ID_OXSEMI_16PCI954 0x9501
++#define PCI_DEVICE_ID_OXSEMI_C950     0x950B
+ #define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511
+ #define PCI_DEVICE_ID_OXSEMI_16PCI954PP       0x9513
+ #define PCI_DEVICE_ID_OXSEMI_16PCI952 0x9521
+ #define PCI_DEVICE_ID_OXSEMI_16PCI952PP       0x9523
++#define PCI_SUBDEVICE_ID_OXSEMI_C950  0x0001
+ 
+ #define PCI_VENDOR_ID_CHELSIO         0x1425
+ 
+ #define PCI_VENDOR_ID_SAMSUNG         0x144d
+ 
++#define PCI_VENDOR_ID_GIGABYTE                0x1458
++
++#define PCI_VENDOR_ID_AMBIT           0x1468
++
+ #define PCI_VENDOR_ID_MYRICOM         0x14c1
+ 
+ #define PCI_VENDOR_ID_TITAN           0x14D2
+@@ -1998,6 +2036,7 @@
+ #define PCI_DEVICE_ID_AFAVLAB_P030    0x2182
+ #define PCI_SUBDEVICE_ID_AFAVLAB_P061         0x2150
+ 
++#define PCI_VENDOR_ID_BCM_GVC          0x14a4
+ #define PCI_VENDOR_ID_BROADCOM                0x14e4
+ #define PCI_DEVICE_ID_TIGON3_5752     0x1600
+ #define PCI_DEVICE_ID_TIGON3_5752M    0x1601
+@@ -2047,7 +2086,6 @@
+ #define PCI_DEVICE_ID_TIGON3_5787M    0x1693
+ #define PCI_DEVICE_ID_TIGON3_5782     0x1696
+ #define PCI_DEVICE_ID_TIGON3_5784     0x1698
+-#define PCI_DEVICE_ID_TIGON3_5785     0x1699
+ #define PCI_DEVICE_ID_TIGON3_5786     0x169a
+ #define PCI_DEVICE_ID_TIGON3_5787     0x169b
+ #define PCI_DEVICE_ID_TIGON3_5788     0x169c
+@@ -2077,6 +2115,7 @@
+ #define PCI_VENDOR_ID_MAINPINE                0x1522
+ #define PCI_DEVICE_ID_MAINPINE_PBRIDGE        0x0100
+ #define PCI_VENDOR_ID_ENE             0x1524
++#define PCI_DEVICE_ID_ENE_CB710_FLASH 0x0510
+ #define PCI_DEVICE_ID_ENE_CB712_SD    0x0550
+ #define PCI_DEVICE_ID_ENE_CB712_SD_2  0x0551
+ #define PCI_DEVICE_ID_ENE_CB714_SD    0x0750
+@@ -2112,6 +2151,8 @@
+ #define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c
+ #define PCI_DEVICE_ID_MELLANOX_SINAI  0x6274
+ 
++#define PCI_VENDOR_ID_DFI             0x15bd
++
+ #define PCI_VENDOR_ID_QUICKNET                0x15e2
+ #define PCI_DEVICE_ID_QUICKNET_XJ     0x0500
+ 
+@@ -2131,6 +2172,10 @@
+ #define PCI_DEVICE_ID_ADDIDATA_APCI7420_3      0x700D
+ #define PCI_DEVICE_ID_ADDIDATA_APCI7300_3      0x700E
+ #define PCI_DEVICE_ID_ADDIDATA_APCI7800_3      0x700F
++#define PCI_DEVICE_ID_ADDIDATA_APCIe7300       0x7010
++#define PCI_DEVICE_ID_ADDIDATA_APCIe7420       0x7011
++#define PCI_DEVICE_ID_ADDIDATA_APCIe7500       0x7012
++#define PCI_DEVICE_ID_ADDIDATA_APCIe7800       0x7013
+ 
+ #define PCI_VENDOR_ID_PDC             0x15e9
+ 
+@@ -2215,10 +2260,20 @@
+ 
+ #define PCI_VENDOR_ID_TOPSPIN         0x1867
+ 
++#define PCI_VENDOR_ID_SILAN           0x1904
++
+ #define PCI_VENDOR_ID_TDI               0x192E
+ #define PCI_DEVICE_ID_TDI_EHCI          0x0101
+ 
+ #define PCI_VENDOR_ID_FREESCALE               0x1957
++#define PCI_DEVICE_ID_MPC8315E                0x00b4
++#define PCI_DEVICE_ID_MPC8315         0x00b5
++#define PCI_DEVICE_ID_MPC8314E                0x00b6
++#define PCI_DEVICE_ID_MPC8314         0x00b7
++#define PCI_DEVICE_ID_MPC8378E                0x00c4
++#define PCI_DEVICE_ID_MPC8378         0x00c5
++#define PCI_DEVICE_ID_MPC8377E                0x00c6
++#define PCI_DEVICE_ID_MPC8377         0x00c7
+ #define PCI_DEVICE_ID_MPC8548E                0x0012
+ #define PCI_DEVICE_ID_MPC8548         0x0013
+ #define PCI_DEVICE_ID_MPC8543E                0x0014
+@@ -2226,6 +2281,8 @@
+ #define PCI_DEVICE_ID_MPC8547E                0x0018
+ #define PCI_DEVICE_ID_MPC8545E                0x0019
+ #define PCI_DEVICE_ID_MPC8545         0x001a
++#define PCI_DEVICE_ID_MPC8569E                0x0061
++#define PCI_DEVICE_ID_MPC8569         0x0060
+ #define PCI_DEVICE_ID_MPC8568E                0x0020
+ #define PCI_DEVICE_ID_MPC8568         0x0021
+ #define PCI_DEVICE_ID_MPC8567E                0x0022
+@@ -2238,6 +2295,22 @@
+ #define PCI_DEVICE_ID_MPC8572         0x0041
+ #define PCI_DEVICE_ID_MPC8536E                0x0050
+ #define PCI_DEVICE_ID_MPC8536         0x0051
++#define PCI_DEVICE_ID_P2020E          0x0070
++#define PCI_DEVICE_ID_P2020           0x0071
++#define PCI_DEVICE_ID_P2010E          0x0078
++#define PCI_DEVICE_ID_P2010           0x0079
++#define PCI_DEVICE_ID_P1020E          0x0100
++#define PCI_DEVICE_ID_P1020           0x0101
++#define PCI_DEVICE_ID_P1011E          0x0108
++#define PCI_DEVICE_ID_P1011           0x0109
++#define PCI_DEVICE_ID_P1022E          0x0110
++#define PCI_DEVICE_ID_P1022           0x0111
++#define PCI_DEVICE_ID_P1013E          0x0118
++#define PCI_DEVICE_ID_P1013           0x0119
++#define PCI_DEVICE_ID_P4080E          0x0400
++#define PCI_DEVICE_ID_P4080           0x0401
++#define PCI_DEVICE_ID_P4040E          0x0408
++#define PCI_DEVICE_ID_P4040           0x0409
+ #define PCI_DEVICE_ID_MPC8641         0x7010
+ #define PCI_DEVICE_ID_MPC8641D                0x7011
+ #define PCI_DEVICE_ID_MPC8610         0x7018
+@@ -2251,6 +2324,7 @@
+ #define PCI_VENDOR_ID_JMICRON         0x197B
+ #define PCI_DEVICE_ID_JMICRON_JMB360  0x2360
+ #define PCI_DEVICE_ID_JMICRON_JMB361  0x2361
++#define PCI_DEVICE_ID_JMICRON_JMB362  0x2362
+ #define PCI_DEVICE_ID_JMICRON_JMB363  0x2363
+ #define PCI_DEVICE_ID_JMICRON_JMB365  0x2365
+ #define PCI_DEVICE_ID_JMICRON_JMB366  0x2366
+@@ -2263,6 +2337,10 @@
+ #define PCI_DEVICE_ID_KORENIX_JETCARDF0       0x1600
+ #define PCI_DEVICE_ID_KORENIX_JETCARDF1       0x16ff
+ 
++#define PCI_VENDOR_ID_QMI             0x1a32
++
++#define PCI_VENDOR_ID_AZWAVE          0x1a3b
++
+ #define PCI_VENDOR_ID_TEKRAM          0x1de1
+ #define PCI_DEVICE_ID_TEKRAM_DC290    0xdc29
+ 
+@@ -2342,6 +2420,9 @@
+ #define PCI_DEVICE_ID_INTEL_82840_HB  0x1a21
+ #define PCI_DEVICE_ID_INTEL_82845_HB  0x1a30
+ #define PCI_DEVICE_ID_INTEL_IOAT      0x1a38
++#define PCI_DEVICE_ID_INTEL_CPT_SMBUS 0x1c22
++#define PCI_DEVICE_ID_INTEL_CPT_LPC1  0x1c42
++#define PCI_DEVICE_ID_INTEL_CPT_LPC2  0x1c43
+ #define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
+ #define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411
+ #define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413
+@@ -2373,6 +2454,7 @@
+ #define PCI_DEVICE_ID_INTEL_82801CA_12        0x248c
+ #define PCI_DEVICE_ID_INTEL_82801DB_0 0x24c0
+ #define PCI_DEVICE_ID_INTEL_82801DB_1 0x24c1
++#define PCI_DEVICE_ID_INTEL_82801DB_2 0x24c2
+ #define PCI_DEVICE_ID_INTEL_82801DB_3 0x24c3
+ #define PCI_DEVICE_ID_INTEL_82801DB_5 0x24c5
+ #define PCI_DEVICE_ID_INTEL_82801DB_6 0x24c6
+@@ -2463,6 +2545,8 @@
+ #define PCI_DEVICE_ID_INTEL_IOAT_TBG3 0x3433
+ #define PCI_DEVICE_ID_INTEL_82830_HB  0x3575
+ #define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577
++#define PCI_DEVICE_ID_INTEL_82854_HB  0x358c
++#define PCI_DEVICE_ID_INTEL_82854_IG  0x358e
+ #define PCI_DEVICE_ID_INTEL_82855GM_HB        0x3580
+ #define PCI_DEVICE_ID_INTEL_82855GM_IG        0x3582
+ #define PCI_DEVICE_ID_INTEL_E7520_MCH 0x3590
+@@ -2476,6 +2560,16 @@
+ #define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e
+ #define PCI_DEVICE_ID_INTEL_IOAT_CNB  0x360b
+ #define PCI_DEVICE_ID_INTEL_FBD_CNB   0x360c
++#define PCI_DEVICE_ID_INTEL_IOAT_JSF0 0x3710
++#define PCI_DEVICE_ID_INTEL_IOAT_JSF1 0x3711
++#define PCI_DEVICE_ID_INTEL_IOAT_JSF2 0x3712
++#define PCI_DEVICE_ID_INTEL_IOAT_JSF3 0x3713
++#define PCI_DEVICE_ID_INTEL_IOAT_JSF4 0x3714
++#define PCI_DEVICE_ID_INTEL_IOAT_JSF5 0x3715
++#define PCI_DEVICE_ID_INTEL_IOAT_JSF6 0x3716
++#define PCI_DEVICE_ID_INTEL_IOAT_JSF7 0x3717
++#define PCI_DEVICE_ID_INTEL_IOAT_JSF8 0x3718
++#define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719
+ #define PCI_DEVICE_ID_INTEL_ICH10_0   0x3a14
+ #define PCI_DEVICE_ID_INTEL_ICH10_1   0x3a16
+ #define PCI_DEVICE_ID_INTEL_ICH10_2   0x3a18
+@@ -2606,6 +2700,7 @@
+ #define PCI_DEVICE_ID_NETMOS_9835     0x9835
+ #define PCI_DEVICE_ID_NETMOS_9845     0x9845
+ #define PCI_DEVICE_ID_NETMOS_9855     0x9855
++#define PCI_DEVICE_ID_NETMOS_9901     0x9901
+ 
+ #define PCI_VENDOR_ID_3COM_2          0xa727
+ 
diff --git a/debian/patches/libdde_phy.patch b/debian/patches/libdde_phy.patch
new file mode 100644
index 0000000..b738512
--- /dev/null
+++ b/debian/patches/libdde_phy.patch
@@ -0,0 +1,60 @@
+--- a/libdde_linux26/contrib/include/linux/phy.h       2012-04-15 
20:31:32.000000000 +0000
++++ b/libdde_linux26/contrib/include/linux/phy.h       2012-03-17 
10:14:52.000000000 +0000
+@@ -79,7 +79,7 @@
+  * Need to be a little smaller than phydev->dev.bus_id to leave room
+  * for the ":%02x"
+  */
+-#define MII_BUS_ID_SIZE       (BUS_ID_SIZE - 3)
++#define MII_BUS_ID_SIZE       (20 - 3)
+ 
+ /*
+  * The Bus class for PHYs.  Devices which provide access to
+@@ -315,8 +315,7 @@
+ 
+       /* Interrupt and Polling infrastructure */
+       struct work_struct phy_queue;
+-      struct work_struct state_queue;
+-      struct timer_list phy_timer;
++      struct delayed_work state_queue;
+       atomic_t irq_disable;
+ 
+       struct mutex lock;
+@@ -389,6 +388,12 @@
+       /* Enables or disables interrupts */
+       int (*config_intr)(struct phy_device *phydev);
+ 
++      /*
++       * Checks if the PHY generated an interrupt.
++       * For multi-PHY devices with shared PHY interrupt pin
++       */
++      int (*did_interrupt)(struct phy_device *phydev);
++
+       /* Clears up any memory if needed */
+       void (*remove)(struct phy_device *phydev);
+ 
+@@ -402,7 +407,7 @@
+ /* A Structure for boards to register fixups with the PHY Lib */
+ struct phy_fixup {
+       struct list_head list;
+-      char bus_id[BUS_ID_SIZE];
++      char bus_id[20];
+       u32 phy_uid;
+       u32 phy_uid_mask;
+       int (*run)(struct phy_device *phydev);
+@@ -439,10 +444,16 @@
+ 
+ int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id);
+ struct phy_device* get_phy_device(struct mii_bus *bus, int addr);
++int phy_device_register(struct phy_device *phy);
+ int phy_clear_interrupt(struct phy_device *phydev);
+ int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
++int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
++              u32 flags, phy_interface_t interface);
+ struct phy_device * phy_attach(struct net_device *dev,
+               const char *bus_id, u32 flags, phy_interface_t interface);
++int phy_connect_direct(struct net_device *dev, struct phy_device *phydev,
++              void (*handler)(struct net_device *), u32 flags,
++              phy_interface_t interface);
+ struct phy_device * phy_connect(struct net_device *dev, const char *bus_id,
+               void (*handler)(struct net_device *), u32 flags,
+               phy_interface_t interface);
diff --git a/debian/patches/libdde_pr_cont.patch 
b/debian/patches/libdde_pr_cont.patch
new file mode 100644
index 0000000..74b88a2
--- /dev/null
+++ b/debian/patches/libdde_pr_cont.patch
@@ -0,0 +1,26 @@
+diff --git a/libdde_linux26/include/linux/kernel.h 
b/libdde_linux26/include/linux/kernel.h
+index 573ed07..6354939 100644
+--- a/libdde_linux26/include/linux/kernel.h
++++ b/libdde_linux26/include/linux/kernel.h
+@@ -363,6 +363,8 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
+         printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+ #define pr_info(fmt, ...) \
+         printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
++#define pr_cont(fmt, ...) \
++        printk(KERN_CONT fmt, ##__VA_ARGS__)
+ 
+ #if defined(DEBUG)
+ #ifndef DDE_LINUX
+diff --git a/libdde_linux26/contrib/include/linux/kernel.h 
b/libdde_linux26/contrib/include/linux/kernel.h
+index 7fa3718..0bded10 100644
+--- a/libdde_linux26/contrib/include/linux/kernel.h
++++ b/libdde_linux26/contrib/include/linux/kernel.h
+@@ -353,6 +353,8 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
+         printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+ #define pr_info(fmt, ...) \
+         printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
++#define pr_cont(fmt, ...) \
++        printk(KERN_CONT fmt, ##__VA_ARGS__)
+ 
+ /* If you are writing a driver, please use dev_dbg instead */
+ #if defined(DEBUG)
diff --git a/debian/patches/libdde_rcu.patch b/debian/patches/libdde_rcu.patch
new file mode 100644
index 0000000..25f8a39
--- /dev/null
+++ b/debian/patches/libdde_rcu.patch
@@ -0,0 +1,816 @@
+--- /dev/null  2011-08-03 18:03:30.000000000 +0000
++++ b/libdde_linux26/contrib/kernel/rcuclassic.c       2012-04-15 
23:40:54.000000000 +0000
+@@ -0,0 +1,788 @@
++/*
++ * Read-Copy Update mechanism for mutual exclusion
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
++ *
++ * Copyright IBM Corporation, 2001
++ *
++ * Authors: Dipankar Sarma <address@hidden>
++ *        Manfred Spraul <address@hidden>
++ *
++ * Based on the original work by Paul McKenney <address@hidden>
++ * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
++ * Papers:
++ * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
++ * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
++ *
++ * For detailed explanation of Read-Copy Update mechanism see -
++ *            Documentation/RCU
++ *
++ */
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/spinlock.h>
++#include <linux/smp.h>
++#include <linux/rcupdate.h>
++#include <linux/interrupt.h>
++#include <linux/sched.h>
++#include <asm/atomic.h>
++#include <linux/bitops.h>
++#include <linux/module.h>
++#include <linux/completion.h>
++#include <linux/moduleparam.h>
++#include <linux/percpu.h>
++#include <linux/notifier.h>
++#include <linux/cpu.h>
++#include <linux/mutex.h>
++#include <linux/time.h>
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++static struct lock_class_key rcu_lock_key;
++struct lockdep_map rcu_lock_map =
++      STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
++EXPORT_SYMBOL_GPL(rcu_lock_map);
++#endif
++
++
++/* Definition for rcupdate control block. */
++static struct rcu_ctrlblk rcu_ctrlblk = {
++      .cur = -300,
++      .completed = -300,
++      .pending = -300,
++      .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
++      .cpumask = CPU_BITS_NONE,
++};
++static struct rcu_ctrlblk rcu_bh_ctrlblk = {
++      .cur = -300,
++      .completed = -300,
++      .pending = -300,
++      .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock),
++      .cpumask = CPU_BITS_NONE,
++};
++
++DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
++DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
++
++static int blimit = 10;
++static int qhimark = 10000;
++static int qlowmark = 100;
++
++#ifdef CONFIG_SMP
++static void force_quiescent_state(struct rcu_data *rdp,
++                      struct rcu_ctrlblk *rcp)
++{
++      int cpu;
++      unsigned long flags;
++
++      set_need_resched();
++      spin_lock_irqsave(&rcp->lock, flags);
++      if (unlikely(!rcp->signaled)) {
++              rcp->signaled = 1;
++              /*
++               * Don't send IPI to itself. With irqs disabled,
++               * rdp->cpu is the current cpu.
++               *
++               * cpu_online_mask is updated by the _cpu_down()
++               * using __stop_machine(). Since we're in irqs disabled
++               * section, __stop_machine() is not exectuting, hence
++               * the cpu_online_mask is stable.
++               *
++               * However,  a cpu might have been offlined _just_ before
++               * we disabled irqs while entering here.
++               * And rcu subsystem might not yet have handled the CPU_DEAD
++               * notification, leading to the offlined cpu's bit
++               * being set in the rcp->cpumask.
++               *
++               * Hence cpumask = (rcp->cpumask & cpu_online_mask) to prevent
++               * sending smp_reschedule() to an offlined CPU.
++               */
++              for_each_cpu_and(cpu,
++                                to_cpumask(rcp->cpumask), cpu_online_mask) {
++                      if (cpu != rdp->cpu)
++                              smp_send_reschedule(cpu);
++              }
++      }
++      spin_unlock_irqrestore(&rcp->lock, flags);
++}
++#else
++static inline void force_quiescent_state(struct rcu_data *rdp,
++                      struct rcu_ctrlblk *rcp)
++{
++      set_need_resched();
++}
++#endif
++
++static void __call_rcu(struct rcu_head *head, struct rcu_ctrlblk *rcp,
++              struct rcu_data *rdp)
++{
++      long batch;
++
++      head->next = NULL;
++      smp_mb(); /* Read of rcu->cur must happen after any change by caller. */
++
++      /*
++       * Determine the batch number of this callback.
++       *
++       * Using ACCESS_ONCE to avoid the following error when gcc eliminates
++       * local variable "batch" and emits codes like this:
++       *      1) rdp->batch = rcp->cur + 1 # gets old value
++       *      ......
++       *      2)rcu_batch_after(rcp->cur + 1, rdp->batch) # gets new value
++       * then [*nxttail[0], *nxttail[1]) may contain callbacks
++       * that batch# = rdp->batch, see the comment of struct rcu_data.
++       */
++      batch = ACCESS_ONCE(rcp->cur) + 1;
++
++      if (rdp->nxtlist && rcu_batch_after(batch, rdp->batch)) {
++              /* process callbacks */
++              rdp->nxttail[0] = rdp->nxttail[1];
++              rdp->nxttail[1] = rdp->nxttail[2];
++              if (rcu_batch_after(batch - 1, rdp->batch))
++                      rdp->nxttail[0] = rdp->nxttail[2];
++      }
++
++      rdp->batch = batch;
++      *rdp->nxttail[2] = head;
++      rdp->nxttail[2] = &head->next;
++
++      if (unlikely(++rdp->qlen > qhimark)) {
++              rdp->blimit = INT_MAX;
++              force_quiescent_state(rdp, &rcu_ctrlblk);
++      }
++}
++
++#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
++
++static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp)
++{
++      rcp->gp_start = jiffies;
++      rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK;
++}
++
++static void print_other_cpu_stall(struct rcu_ctrlblk *rcp)
++{
++      int cpu;
++      long delta;
++      unsigned long flags;
++
++      /* Only let one CPU complain about others per time interval. */
++
++      spin_lock_irqsave(&rcp->lock, flags);
++      delta = jiffies - rcp->jiffies_stall;
++      if (delta < 2 || rcp->cur != rcp->completed) {
++              spin_unlock_irqrestore(&rcp->lock, flags);
++              return;
++      }
++      rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
++      spin_unlock_irqrestore(&rcp->lock, flags);
++
++      /* OK, time to rat on our buddy... */
++
++      printk(KERN_ERR "INFO: RCU detected CPU stalls:");
++      for_each_possible_cpu(cpu) {
++              if (cpumask_test_cpu(cpu, to_cpumask(rcp->cpumask)))
++                      printk(" %d", cpu);
++      }
++      printk(" (detected by %d, t=%ld jiffies)\n",
++             smp_processor_id(), (long)(jiffies - rcp->gp_start));
++}
++
++static void print_cpu_stall(struct rcu_ctrlblk *rcp)
++{
++      unsigned long flags;
++
++      printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu/%lu jiffies)\n",
++                      smp_processor_id(), jiffies,
++                      jiffies - rcp->gp_start);
++      dump_stack();
++      spin_lock_irqsave(&rcp->lock, flags);
++      if ((long)(jiffies - rcp->jiffies_stall) >= 0)
++              rcp->jiffies_stall =
++                      jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
++      spin_unlock_irqrestore(&rcp->lock, flags);
++      set_need_resched();  /* kick ourselves to get things going. */
++}
++
++static void check_cpu_stall(struct rcu_ctrlblk *rcp)
++{
++      long delta;
++
++      delta = jiffies - rcp->jiffies_stall;
++      if (cpumask_test_cpu(smp_processor_id(), to_cpumask(rcp->cpumask)) &&
++              delta >= 0) {
++
++              /* We haven't checked in, so go dump stack. */
++              print_cpu_stall(rcp);
++
++      } else if (rcp->cur != rcp->completed && delta >= 2) {
++
++              /* They had two seconds to dump stack, so complain. */
++              print_other_cpu_stall(rcp);
++      }
++}
++
++#else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
++
++static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp)
++{
++}
++
++static inline void check_cpu_stall(struct rcu_ctrlblk *rcp)
++{
++}
++
++#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
++
++/**
++ * call_rcu - Queue an RCU callback for invocation after a grace period.
++ * @head: structure to be used for queueing the RCU updates.
++ * @func: actual update function to be invoked after the grace period
++ *
++ * The update function will be invoked some time after a full grace
++ * period elapses, in other words after all currently executing RCU
++ * read-side critical sections have completed.  RCU read-side critical
++ * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
++ * and may be nested.
++ */
++void call_rcu(struct rcu_head *head,
++                              void (*func)(struct rcu_head *rcu))
++{
++      unsigned long flags;
++
++      head->func = func;
++      local_irq_save(flags);
++      __call_rcu(head, &rcu_ctrlblk, &__get_cpu_var(rcu_data));
++      local_irq_restore(flags);
++}
++EXPORT_SYMBOL_GPL(call_rcu);
++
++/**
++ * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
++ * @head: structure to be used for queueing the RCU updates.
++ * @func: actual update function to be invoked after the grace period
++ *
++ * The update function will be invoked some time after a full grace
++ * period elapses, in other words after all currently executing RCU
++ * read-side critical sections have completed. call_rcu_bh() assumes
++ * that the read-side critical sections end on completion of a softirq
++ * handler. This means that read-side critical sections in process
++ * context must not be interrupted by softirqs. This interface is to be
++ * used when most of the read-side critical sections are in softirq context.
++ * RCU read-side critical sections are delimited by rcu_read_lock() and
++ * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh()
++ * and rcu_read_unlock_bh(), if in process context. These may be nested.
++ */
++void call_rcu_bh(struct rcu_head *head,
++                              void (*func)(struct rcu_head *rcu))
++{
++      unsigned long flags;
++
++      head->func = func;
++      local_irq_save(flags);
++      __call_rcu(head, &rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
++      local_irq_restore(flags);
++}
++EXPORT_SYMBOL_GPL(call_rcu_bh);
++
++/*
++ * Return the number of RCU batches processed thus far.  Useful
++ * for debug and statistics.
++ */
++long rcu_batches_completed(void)
++{
++      return rcu_ctrlblk.completed;
++}
++EXPORT_SYMBOL_GPL(rcu_batches_completed);
++
++/*
++ * Return the number of RCU batches processed thus far.  Useful
++ * for debug and statistics.
++ */
++long rcu_batches_completed_bh(void)
++{
++      return rcu_bh_ctrlblk.completed;
++}
++EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
++
++/* Raises the softirq for processing rcu_callbacks. */
++static inline void raise_rcu_softirq(void)
++{
++      raise_softirq(RCU_SOFTIRQ);
++}
++
++/*
++ * Invoke the completed RCU callbacks. They are expected to be in
++ * a per-cpu list.
++ */
++static void rcu_do_batch(struct rcu_data *rdp)
++{
++      unsigned long flags;
++      struct rcu_head *next, *list;
++      int count = 0;
++
++      list = rdp->donelist;
++      while (list) {
++              next = list->next;
++              prefetch(next);
++              list->func(list);
++              list = next;
++              if (++count >= rdp->blimit)
++                      break;
++      }
++      rdp->donelist = list;
++
++      local_irq_save(flags);
++      rdp->qlen -= count;
++      local_irq_restore(flags);
++      if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
++              rdp->blimit = blimit;
++
++      if (!rdp->donelist)
++              rdp->donetail = &rdp->donelist;
++      else
++              raise_rcu_softirq();
++}
++
++/*
++ * Grace period handling:
++ * The grace period handling consists out of two steps:
++ * - A new grace period is started.
++ *   This is done by rcu_start_batch. The start is not broadcasted to
++ *   all cpus, they must pick this up by comparing rcp->cur with
++ *   rdp->quiescbatch. All cpus are recorded  in the
++ *   rcu_ctrlblk.cpumask bitmap.
++ * - All cpus must go through a quiescent state.
++ *   Since the start of the grace period is not broadcasted, at least two
++ *   calls to rcu_check_quiescent_state are required:
++ *   The first call just notices that a new grace period is running. The
++ *   following calls check if there was a quiescent state since the beginning
++ *   of the grace period. If so, it updates rcu_ctrlblk.cpumask. If
++ *   the bitmap is empty, then the grace period is completed.
++ *   rcu_check_quiescent_state calls rcu_start_batch(0) to start the next 
grace
++ *   period (if necessary).
++ */
++
++/*
++ * Register a new batch of callbacks, and start it up if there is currently no
++ * active batch and the batch to be registered has not already occurred.
++ * Caller must hold rcu_ctrlblk.lock.
++ */
++static void rcu_start_batch(struct rcu_ctrlblk *rcp)
++{
++      if (rcp->cur != rcp->pending &&
++                      rcp->completed == rcp->cur) {
++              rcp->cur++;
++              record_gp_stall_check_time(rcp);
++
++              /*
++               * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
++               * Barrier  Otherwise it can cause tickless idle CPUs to be
++               * included in rcp->cpumask, which will extend graceperiods
++               * unnecessarily.
++               */
++              smp_mb();
++              cpumask_andnot(to_cpumask(rcp->cpumask),
++                             cpu_online_mask, nohz_cpu_mask);
++
++              rcp->signaled = 0;
++      }
++}
++
++/*
++ * cpu went through a quiescent state since the beginning of the grace period.
++ * Clear it from the cpu mask and complete the grace period if it was the last
++ * cpu. Start another grace period if someone has further entries pending
++ */
++static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
++{
++      cpumask_clear_cpu(cpu, to_cpumask(rcp->cpumask));
++      if (cpumask_empty(to_cpumask(rcp->cpumask))) {
++              /* batch completed ! */
++              rcp->completed = rcp->cur;
++              rcu_start_batch(rcp);
++      }
++}
++
++/*
++ * Check if the cpu has gone through a quiescent state (say context
++ * switch). If so and if it already hasn't done so in this RCU
++ * quiescent cycle, then indicate that it has done so.
++ */
++static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
++                                      struct rcu_data *rdp)
++{
++      unsigned long flags;
++
++      if (rdp->quiescbatch != rcp->cur) {
++              /* start new grace period: */
++              rdp->qs_pending = 1;
++              rdp->passed_quiesc = 0;
++              rdp->quiescbatch = rcp->cur;
++              return;
++      }
++
++      /* Grace period already completed for this cpu?
++       * qs_pending is checked instead of the actual bitmap to avoid
++       * cacheline trashing.
++       */
++      if (!rdp->qs_pending)
++              return;
++
++      /*
++       * Was there a quiescent state since the beginning of the grace
++       * period? If no, then exit and wait for the next call.
++       */
++      if (!rdp->passed_quiesc)
++              return;
++      rdp->qs_pending = 0;
++
++      spin_lock_irqsave(&rcp->lock, flags);
++      /*
++       * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync
++       * during cpu startup. Ignore the quiescent state.
++       */
++      if (likely(rdp->quiescbatch == rcp->cur))
++              cpu_quiet(rdp->cpu, rcp);
++
++      spin_unlock_irqrestore(&rcp->lock, flags);
++}
++
++
++#ifdef CONFIG_HOTPLUG_CPU
++
++/* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing
++ * locking requirements, the list it's pulling from has to belong to a cpu
++ * which is dead and hence not processing interrupts.
++ */
++static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list,
++                              struct rcu_head **tail, long batch)
++{
++      unsigned long flags;
++
++      if (list) {
++              local_irq_save(flags);
++              this_rdp->batch = batch;
++              *this_rdp->nxttail[2] = list;
++              this_rdp->nxttail[2] = tail;
++              local_irq_restore(flags);
++      }
++}
++
++static void __rcu_offline_cpu(struct rcu_data *this_rdp,
++                              struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
++{
++      unsigned long flags;
++
++      /*
++       * if the cpu going offline owns the grace period
++       * we can block indefinitely waiting for it, so flush
++       * it here
++       */
++      spin_lock_irqsave(&rcp->lock, flags);
++      if (rcp->cur != rcp->completed)
++              cpu_quiet(rdp->cpu, rcp);
++      rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail, rcp->cur + 1);
++      rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail[2], rcp->cur + 1);
++      spin_unlock(&rcp->lock);
++
++      this_rdp->qlen += rdp->qlen;
++      local_irq_restore(flags);
++}
++
++static void rcu_offline_cpu(int cpu)
++{
++      struct rcu_data *this_rdp = &get_cpu_var(rcu_data);
++      struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data);
++
++      __rcu_offline_cpu(this_rdp, &rcu_ctrlblk,
++                                      &per_cpu(rcu_data, cpu));
++      __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk,
++                                      &per_cpu(rcu_bh_data, cpu));
++      put_cpu_var(rcu_data);
++      put_cpu_var(rcu_bh_data);
++}
++
++#else
++
++static void rcu_offline_cpu(int cpu)
++{
++}
++
++#endif
++
++/*
++ * This does the RCU processing work from softirq context.
++ */
++static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
++                                      struct rcu_data *rdp)
++{
++      unsigned long flags;
++      long completed_snap;
++
++      if (rdp->nxtlist) {
++              local_irq_save(flags);
++              completed_snap = ACCESS_ONCE(rcp->completed);
++
++              /*
++               * move the other grace-period-completed entries to
++               * [rdp->nxtlist, *rdp->nxttail[0]) temporarily
++               */
++              if (!rcu_batch_before(completed_snap, rdp->batch))
++                      rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2];
++              else if (!rcu_batch_before(completed_snap, rdp->batch - 1))
++                      rdp->nxttail[0] = rdp->nxttail[1];
++
++              /*
++               * the grace period for entries in
++               * [rdp->nxtlist, *rdp->nxttail[0]) has completed and
++               * move these entries to donelist
++               */
++              if (rdp->nxttail[0] != &rdp->nxtlist) {
++                      *rdp->donetail = rdp->nxtlist;
++                      rdp->donetail = rdp->nxttail[0];
++                      rdp->nxtlist = *rdp->nxttail[0];
++                      *rdp->donetail = NULL;
++
++                      if (rdp->nxttail[1] == rdp->nxttail[0])
++                              rdp->nxttail[1] = &rdp->nxtlist;
++                      if (rdp->nxttail[2] == rdp->nxttail[0])
++                              rdp->nxttail[2] = &rdp->nxtlist;
++                      rdp->nxttail[0] = &rdp->nxtlist;
++              }
++
++              local_irq_restore(flags);
++
++              if (rcu_batch_after(rdp->batch, rcp->pending)) {
++                      unsigned long flags2;
++
++                      /* and start it/schedule start if it's a new batch */
++                      spin_lock_irqsave(&rcp->lock, flags2);
++                      if (rcu_batch_after(rdp->batch, rcp->pending)) {
++                              rcp->pending = rdp->batch;
++                              rcu_start_batch(rcp);
++                      }
++                      spin_unlock_irqrestore(&rcp->lock, flags2);
++              }
++      }
++
++      rcu_check_quiescent_state(rcp, rdp);
++      if (rdp->donelist)
++              rcu_do_batch(rdp);
++}
++
++static void rcu_process_callbacks(struct softirq_action *unused)
++{
++      /*
++       * Memory references from any prior RCU read-side critical sections
++       * executed by the interrupted code must be see before any RCU
++       * grace-period manupulations below.
++       */
++
++      smp_mb(); /* See above block comment. */
++
++      __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));
++      __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
++
++      /*
++       * Memory references from any later RCU read-side critical sections
++       * executed by the interrupted code must be see after any RCU
++       * grace-period manupulations above.
++       */
++
++      smp_mb(); /* See above block comment. */
++}
++
++static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
++{
++      /* Check for CPU stalls, if enabled. */
++      check_cpu_stall(rcp);
++
++      if (rdp->nxtlist) {
++              long completed_snap = ACCESS_ONCE(rcp->completed);
++
++              /*
++               * This cpu has pending rcu entries and the grace period
++               * for them has completed.
++               */
++              if (!rcu_batch_before(completed_snap, rdp->batch))
++                      return 1;
++              if (!rcu_batch_before(completed_snap, rdp->batch - 1) &&
++                              rdp->nxttail[0] != rdp->nxttail[1])
++                      return 1;
++              if (rdp->nxttail[0] != &rdp->nxtlist)
++                      return 1;
++
++              /*
++               * This cpu has pending rcu entries and the new batch
++               * for then hasn't been started nor scheduled start
++               */
++              if (rcu_batch_after(rdp->batch, rcp->pending))
++                      return 1;
++      }
++
++      /* This cpu has finished callbacks to invoke */
++      if (rdp->donelist)
++              return 1;
++
++      /* The rcu core waits for a quiescent state from the cpu */
++      if (rdp->quiescbatch != rcp->cur || rdp->qs_pending)
++              return 1;
++
++      /* nothing to do */
++      return 0;
++}
++
++/*
++ * Check to see if there is any immediate RCU-related work to be done
++ * by the current CPU, returning 1 if so.  This function is part of the
++ * RCU implementation; it is -not- an exported member of the RCU API.
++ */
++int rcu_pending(int cpu)
++{
++      return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) ||
++              __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));
++}
++
++/*
++ * Check to see if any future RCU-related work will need to be done
++ * by the current CPU, even if none need be done immediately, returning
++ * 1 if so.  This function is part of the RCU implementation; it is -not-
++ * an exported member of the RCU API.
++ */
++int rcu_needs_cpu(int cpu)
++{
++      struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
++      struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);
++
++      return !!rdp->nxtlist || !!rdp_bh->nxtlist || rcu_pending(cpu);
++}
++
++/*
++ * Top-level function driving RCU grace-period detection, normally
++ * invoked from the scheduler-clock interrupt.  This function simply
++ * increments counters that are read only from softirq by this same
++ * CPU, so there are no memory barriers required.
++ */
++void rcu_check_callbacks(int cpu, int user)
++{
++      if (user ||
++          (idle_cpu(cpu) && rcu_scheduler_active &&
++           !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
++
++              /*
++               * Get here if this CPU took its interrupt from user
++               * mode or from the idle loop, and if this is not a
++               * nested interrupt.  In this case, the CPU is in
++               * a quiescent state, so count it.
++               *
++               * Also do a memory barrier.  This is needed to handle
++               * the case where writes from a preempt-disable section
++               * of code get reordered into schedule() by this CPU's
++               * write buffer.  The memory barrier makes sure that
++               * the rcu_qsctr_inc() and rcu_bh_qsctr_inc() are see
++               * by other CPUs to happen after any such write.
++               */
++
++              smp_mb();  /* See above block comment. */
++              rcu_qsctr_inc(cpu);
++              rcu_bh_qsctr_inc(cpu);
++
++      } else if (!in_softirq()) {
++
++              /*
++               * Get here if this CPU did not take its interrupt from
++               * softirq, in other words, if it is not interrupting
++               * a rcu_bh read-side critical section.  This is an _bh
++               * critical section, so count it.  The memory barrier
++               * is needed for the same reason as is the above one.
++               */
++
++              smp_mb();  /* See above block comment. */
++              rcu_bh_qsctr_inc(cpu);
++      }
++      raise_rcu_softirq();
++}
++
++static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
++                                              struct rcu_data *rdp)
++{
++      unsigned long flags;
++
++      spin_lock_irqsave(&rcp->lock, flags);
++      memset(rdp, 0, sizeof(*rdp));
++      rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2] = &rdp->nxtlist;
++      rdp->donetail = &rdp->donelist;
++      rdp->quiescbatch = rcp->completed;
++      rdp->qs_pending = 0;
++      rdp->cpu = cpu;
++      rdp->blimit = blimit;
++      spin_unlock_irqrestore(&rcp->lock, flags);
++}
++
++static void __cpuinit rcu_online_cpu(int cpu)
++{
++      struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
++      struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu);
++
++      rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
++      rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp);
++      open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
++}
++
++static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
++                              unsigned long action, void *hcpu)
++{
++      long cpu = (long)hcpu;
++
++      switch (action) {
++      case CPU_UP_PREPARE:
++      case CPU_UP_PREPARE_FROZEN:
++              rcu_online_cpu(cpu);
++              break;
++      case CPU_DEAD:
++      case CPU_DEAD_FROZEN:
++              rcu_offline_cpu(cpu);
++              break;
++      default:
++              break;
++      }
++      return NOTIFY_OK;
++}
++
++static struct notifier_block __cpuinitdata rcu_nb = {
++      .notifier_call  = rcu_cpu_notify,
++};
++
++/*
++ * Initializes rcu mechanism.  Assumed to be called early.
++ * That is before local timer(SMP) or jiffie timer (uniproc) is setup.
++ * Note that rcu_qsctr and friends are implicitly
++ * initialized due to the choice of ``0'' for RCU_CTR_INVALID.
++ */
++void __init __rcu_init(void)
++{
++#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
++      printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
++#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
++      rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
++                      (void *)(long)smp_processor_id());
++      /* Register notifier for non-boot CPUs */
++      register_cpu_notifier(&rcu_nb);
++}
++
++module_param(blimit, int, 0);
++module_param(qhimark, int, 0);
++module_param(qlowmark, int, 0);
+diff --git a/libdde_linux26/contrib/kernel/rcupdate.c 
b/libdde_linux26/contrib/kernel/rcupdate.c
+index cae8a05..c6bfa1a 100644
+--- a/libdde_linux26/contrib/kernel/rcupdate.c
++++ b/libdde_linux26/contrib/kernel/rcupdate.c
+@@ -180,6 +180,7 @@ void __init rcu_init(void)
+ {
+       __rcu_init();
+ }
++core_initcall(rcu_init);
+ 
+ void rcu_scheduler_starting(void)
+ {
+diff --git a/libdde_linux26/lib/src/Makefile b/libdde_linux26/lib/src/Makefile
+index 358196b..450c4e6 100644
+--- a/libdde_linux26/lib/src/Makefile
++++ b/libdde_linux26/lib/src/Makefile
+@@ -89,6 +89,8 @@ SRC_C_libdde_linux26.o.a += \
+                               kernel/kthread.c \
+                               kernel/mutex.c \
+                               kernel/notifier.c \
++                              kernel/rcupdate.c \
++                              kernel/rcuclassic.c \
+                               kernel/resource.c \
+                               kernel/rwsem.c \
+                               kernel/sched.c \
diff --git a/debian/patches/libdde_rculist.patch 
b/debian/patches/libdde_rculist.patch
new file mode 100644
index 0000000..19e512a
--- /dev/null
+++ b/debian/patches/libdde_rculist.patch
@@ -0,0 +1,12 @@
+diff --git a/libdde_linux26/contrib/include/linux/netdevice.h 
b/libdde_linux26/contrib/include/linux/netdevice.h
+index 6593667..bb4fca1 100644
+--- a/libdde_linux26/contrib/include/linux/netdevice.h
++++ b/libdde_linux26/contrib/include/linux/netdevice.h
+@@ -37,6 +37,7 @@
+ #include <asm/byteorder.h>
+ 
+ #include <linux/device.h>
++#include <linux/rculist.h>
+ #include <linux/percpu.h>
+ #include <linux/dmaengine.h>
+ #include <linux/workqueue.h>
diff --git a/debian/patches/libdde_rx_queue.patch 
b/debian/patches/libdde_rx_queue.patch
new file mode 100644
index 0000000..0d96264
--- /dev/null
+++ b/debian/patches/libdde_rx_queue.patch
@@ -0,0 +1,70 @@
+commit d5a9e24afb4ab38110ebb777588ea0bd0eacbd0a
+Author: David S. Miller <address@hidden>
+Date:   Tue Jan 27 16:22:11 2009 -0800
+
+    net: Allow RX queue selection to seed TX queue hashing.
+    
+    The idea is that drivers which implement multiqueue RX
+    pre-seed the SKB by recording the RX queue selected by
+    the hardware.
+    
+    If such a seed is found on TX, we'll use that to select
+    the outgoing TX queue.
+    
+    This helps get more consistent load balancing on router
+    and firewall loads.
+    
+    Signed-off-by: David S. Miller <address@hidden>
+
+Index: hurd-debian/libdde_linux26/contrib/include/linux/skbuff.h
+===================================================================
+--- hurd-debian.orig/libdde_linux26/contrib/include/linux/skbuff.h     
2012-04-16 00:34:56.000000000 +0000
++++ hurd-debian/libdde_linux26/contrib/include/linux/skbuff.h  2012-04-16 
00:35:11.000000000 +0000
+@@ -1903,6 +1903,21 @@
+       to->queue_mapping = from->queue_mapping;
+ }
+ 
++static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
++{
++      skb->queue_mapping = rx_queue + 1;
++}
++
++static inline u16 skb_get_rx_queue(struct sk_buff *skb)
++{
++      return skb->queue_mapping - 1;
++}
++
++static inline bool skb_rx_queue_recorded(struct sk_buff *skb)
++{
++      return (skb->queue_mapping != 0);
++}
++
+ #ifdef CONFIG_XFRM
+ static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
+ {
+Index: hurd-debian/libdde_linux26/lib/src/net/core/dev.c
+===================================================================
+--- hurd-debian.orig/libdde_linux26/lib/src/net/core/dev.c     2012-04-16 
00:34:51.000000000 +0000
++++ hurd-debian/libdde_linux26/lib/src/net/core/dev.c  2012-04-16 
00:35:11.000000000 +0000
+@@ -1731,6 +1731,13 @@
+               simple_tx_hashrnd_initialized = 1;
+       }
+ 
++      if (skb_rx_queue_recorded(skb)) {
++              u32 val = skb_get_rx_queue(skb);
++
++              hash = jhash_1word(val, simple_tx_hashrnd);
++              goto out;
++      }
++
+       switch (skb->protocol) {
+       case htons(ETH_P_IP):
+               if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)))
+@@ -1768,6 +1775,7 @@
+ 
+       hash = jhash_3words(addr1, addr2, ports, simple_tx_hashrnd);
+ 
++out:
+       return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
+ }
+ 
diff --git a/debian/patches/libdde_trans_start.patch 
b/debian/patches/libdde_trans_start.patch
new file mode 100644
index 0000000..07acc6d
--- /dev/null
+++ b/debian/patches/libdde_trans_start.patch
@@ -0,0 +1,138 @@
+commit 9d21493b4beb8f918ba248032fefa393074a5e2b
+Author: Eric Dumazet <address@hidden>
+Date:   Sun May 17 20:55:16 2009 -0700
+
+    net: tx scalability works : trans_start
+    
+    struct net_device trans_start field is a hot spot on SMP and high 
performance
+    devices, particularly multi queues ones, because every transmitter dirties
+    it. Is main use is tx watchdog and bonding alive checks.
+    
+    But as most devices dont use NETIF_F_LLTX, we have to lock
+    a netdev_queue before calling their ndo_start_xmit(). So it makes
+    sense to move trans_start from net_device to netdev_queue. Its update
+    will occur on a already present (and in exclusive state) cache line, for
+    free.
+    
+    We can do this transition smoothly. An old driver continue to
+    update dev->trans_start, while an updated one updates txq->trans_start.
+    
+    Further patches could also put tx_bytes/tx_packets counters in
+    netdev_queue to avoid dirtying dev->stats (vlan device comes to mind)
+    
+    Signed-off-by: Eric Dumazet <address@hidden>
+    Signed-off-by: David S. Miller <address@hidden>
+
+Index: hurd-debian/libdde_linux26/contrib/include/linux/netdevice.h
+===================================================================
+--- hurd-debian.orig/libdde_linux26/contrib/include/linux/netdevice.h  
2012-04-16 00:34:54.000000000 +0000
++++ hurd-debian/libdde_linux26/contrib/include/linux/netdevice.h       
2012-04-16 00:35:04.000000000 +0000
+@@ -462,6 +462,10 @@
+       spinlock_t              _xmit_lock;
+       int                     xmit_lock_owner;
+       struct Qdisc            *qdisc_sleeping;
++      /*
++       * please use this field instead of dev->trans_start
++       */
++      unsigned long           trans_start;
+ } ____cacheline_aligned_in_smp;
+ 
+ 
+@@ -801,6 +805,11 @@
+  * One part is mostly used on xmit path (device)
+  */
+       /* These may be needed for future network-power-down code. */
++
++      /*
++       * trans_start here is expensive for high speed devices on SMP,
++       * please use netdev_queue->trans_start instead.
++       */
+       unsigned long           trans_start;    /* Time (in jiffies) of last Tx 
*/
+ 
+       int                     watchdog_timeo; /* used by dev_watchdog() */
+@@ -1477,6 +1486,8 @@
+       return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
+ }
+ 
++extern unsigned long dev_trans_start(struct net_device *dev);
++
+ extern void __netdev_watchdog_up(struct net_device *dev);
+ 
+ extern void netif_carrier_on(struct net_device *dev);
+Index: hurd-debian/libdde_linux26/lib/src/net/sched/sch_generic.c
+===================================================================
+--- hurd-debian.orig/libdde_linux26/lib/src/net/sched/sch_generic.c    
2012-04-16 00:26:38.000000000 +0000
++++ hurd-debian/libdde_linux26/lib/src/net/sched/sch_generic.c 2012-04-16 
00:35:04.000000000 +0000
+@@ -200,6 +200,21 @@
+       clear_bit(__QDISC_STATE_RUNNING, &q->state);
+ }
+ 
++unsigned long dev_trans_start(struct net_device *dev)
++{
++      unsigned long val, res = dev->trans_start;
++      unsigned int i;
++
++      for (i = 0; i < dev->num_tx_queues; i++) {
++              val = netdev_get_tx_queue(dev, i)->trans_start;
++              if (val && time_after(val, res))
++                      res = val;
++      }
++      dev->trans_start = res;
++      return res;
++}
++EXPORT_SYMBOL(dev_trans_start);
++
+ static void dev_watchdog(unsigned long arg)
+ {
+       struct net_device *dev = (struct net_device *)arg;
+@@ -209,25 +224,30 @@
+               if (netif_device_present(dev) &&
+                   netif_running(dev) &&
+                   netif_carrier_ok(dev)) {
+-                      int some_queue_stopped = 0;
++                      int some_queue_timedout = 0;
+                       unsigned int i;
++                      unsigned long trans_start;
+ 
+                       for (i = 0; i < dev->num_tx_queues; i++) {
+                               struct netdev_queue *txq;
+ 
+                               txq = netdev_get_tx_queue(dev, i);
+-                              if (netif_tx_queue_stopped(txq)) {
+-                                      some_queue_stopped = 1;
++                              /*
++                               * old device drivers set dev->trans_start
++                               */
++                              trans_start = txq->trans_start ? : 
dev->trans_start;
++                              if (netif_tx_queue_stopped(txq) &&
++                                  time_after(jiffies, (trans_start +
++                                                       dev->watchdog_timeo))) 
{
++                                      some_queue_timedout = 1;
+                                       break;
+                               }
+                       }
+ 
+-                      if (some_queue_stopped &&
+-                          time_after(jiffies, (dev->trans_start +
+-                                               dev->watchdog_timeo))) {
++                      if (some_queue_timedout) {
+                               char drivername[64];
+-                              WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s 
(%s): transmit timed out\n",
+-                                     dev->name, netdev_drivername(dev, 
drivername, 64));
++                              WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s 
(%s): transmit queue %u timed out\n",
++                                     dev->name, netdev_drivername(dev, 
drivername, 64), i);
+                               dev->netdev_ops->ndo_tx_timeout(dev);
+                       }
+                       if (!mod_timer(&dev->watchdog_timer,
+@@ -612,8 +632,10 @@
+               clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
+ 
+       rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
+-      if (need_watchdog_p && new_qdisc != &noqueue_qdisc)
++      if (need_watchdog_p && new_qdisc != &noqueue_qdisc) {
++              dev_queue->trans_start = 0;
+               *need_watchdog_p = 1;
++      }
+ }
+ 
+ void dev_activate(struct net_device *dev)
diff --git a/debian/patches/libdde_ucast_list.patch 
b/debian/patches/libdde_ucast_list.patch
new file mode 100644
index 0000000..5abd344
--- /dev/null
+++ b/debian/patches/libdde_ucast_list.patch
@@ -0,0 +1,488 @@
+commit ccffad25b5136958d4769ed6de5e87992dd9c65c
+Author: Jiri Pirko <address@hidden>
+Date:   Fri May 22 23:22:17 2009 +0000
+
+    net: convert unicast addr list
+    
+    This patch converts unicast address list to standard list_head using
+    previously introduced struct netdev_hw_addr. It also relaxes the
+    locking. Original spinlock (still used for multicast addresses) is not
+    needed and is no longer used for a protection of this list. All
+    reading and writing takes place under rtnl (with no changes).
+    
+    I also removed a possibility to specify the length of the address
+    while adding or deleting unicast address. It's always dev->addr_len.
+    
+    The convertion touched especially e1000 and ixgbe codes when the
+    change is not so trivial.
+    
+    Signed-off-by: Jiri Pirko <address@hidden>
+    
+     drivers/net/bnx2.c               |   13 +--
+     drivers/net/e1000/e1000_main.c   |   24 +++--
+     drivers/net/ixgbe/ixgbe_common.c |   14 ++--
+     drivers/net/ixgbe/ixgbe_common.h |    4 +-
+     drivers/net/ixgbe/ixgbe_main.c   |    6 +-
+     drivers/net/ixgbe/ixgbe_type.h   |    4 +-
+     drivers/net/macvlan.c            |   11 +-
+     drivers/net/mv643xx_eth.c        |   11 +-
+     drivers/net/niu.c                |    7 +-
+     drivers/net/virtio_net.c         |    7 +-
+     drivers/s390/net/qeth_l2_main.c  |    6 +-
+     drivers/scsi/fcoe/fcoe.c         |   16 ++--
+     include/linux/netdevice.h        |   18 ++--
+     net/8021q/vlan.c                 |    4 +-
+     net/8021q/vlan_dev.c             |   10 +-
+     net/core/dev.c                   |  195 
+++++++++++++++++++++++++++-----------
+     net/dsa/slave.c                  |   10 +-
+     net/packet/af_packet.c           |    4 +-
+     18 files changed, 227 insertions(+), 137 deletions(-)
+    Signed-off-by: David S. Miller <address@hidden>
+
+Index: hurd-debian/libdde_linux26/contrib/include/linux/netdevice.h
+===================================================================
+--- hurd-debian.orig/libdde_linux26/contrib/include/linux/netdevice.h  
2012-04-16 00:34:43.000000000 +0000
++++ hurd-debian/libdde_linux26/contrib/include/linux/netdevice.h       
2012-04-16 00:34:46.000000000 +0000
+@@ -215,9 +215,12 @@
+       struct list_head        list;
+       unsigned char           addr[MAX_ADDR_LEN];
+       unsigned char           type;
+-#define NETDEV_HW_ADDR_T_LAN  1
+-#define NETDEV_HW_ADDR_T_SAN  2
+-#define NETDEV_HW_ADDR_T_SLAVE        3
++#define NETDEV_HW_ADDR_T_LAN          1
++#define NETDEV_HW_ADDR_T_SAN          2
++#define NETDEV_HW_ADDR_T_SLAVE                3
++#define NETDEV_HW_ADDR_T_UNICAST      4
++      int                     refcount;
++      bool                    synced;
+       struct rcu_head         rcu_head;
+ };
+ 
+@@ -738,10 +741,11 @@
+       unsigned char           addr_len;       /* hardware address length      
*/
+       unsigned short          dev_id;         /* for shared network cards */
+ 
+-      spinlock_t              addr_list_lock;
+-      struct dev_addr_list    *uc_list;       /* Secondary unicast mac 
addresses */
++      struct list_head        uc_list;        /* Secondary unicast mac
++                                                 addresses */
+       int                     uc_count;       /* Number of installed ucasts   
*/
+       int                     uc_promisc;
++      spinlock_t              addr_list_lock;
+       struct dev_addr_list    *mc_list;       /* Multicast mac addresses      
*/
+       int                     mc_count;       /* Number of installed mcasts   
*/
+       unsigned int            promiscuity;
+@@ -1816,8 +1820,8 @@
+ /* Functions used for secondary unicast and multicast support */
+ extern void           dev_set_rx_mode(struct net_device *dev);
+ extern void           __dev_set_rx_mode(struct net_device *dev);
+-extern int            dev_unicast_delete(struct net_device *dev, void *addr, 
int alen);
+-extern int            dev_unicast_add(struct net_device *dev, void *addr, int 
alen);
++extern int            dev_unicast_delete(struct net_device *dev, void *addr);
++extern int            dev_unicast_add(struct net_device *dev, void *addr);
+ extern int            dev_unicast_sync(struct net_device *to, struct 
net_device *from);
+ extern void           dev_unicast_unsync(struct net_device *to, struct 
net_device *from);
+ extern int            dev_mc_delete(struct net_device *dev, void *addr, int 
alen, int all);
+Index: hurd-debian/libdde_linux26/lib/src/net/core/dev.c
+===================================================================
+--- hurd-debian.orig/libdde_linux26/lib/src/net/core/dev.c     2012-04-16 
00:34:43.000000000 +0000
++++ hurd-debian/libdde_linux26/lib/src/net/core/dev.c  2012-04-16 
00:34:46.000000000 +0000
+@@ -3399,8 +3399,9 @@
+ 
+ /* hw addresses list handling functions */
+ 
+-static int __hw_addr_add(struct list_head *list, unsigned char *addr,
+-                       int addr_len, unsigned char addr_type)
++static int __hw_addr_add(struct list_head *list, int *delta,
++                       unsigned char *addr, int addr_len,
++                       unsigned char addr_type)
+ {
+       struct netdev_hw_addr *ha;
+       int alloc_size;
+@@ -3408,6 +3409,15 @@
+       if (addr_len > MAX_ADDR_LEN)
+               return -EINVAL;
+ 
++      list_for_each_entry(ha, list, list) {
++              if (!memcmp(ha->addr, addr, addr_len) &&
++                  ha->type == addr_type) {
++                      ha->refcount++;
++                      return 0;
++              }
++      }
++
++
+       alloc_size = sizeof(*ha);
+       if (alloc_size < L1_CACHE_BYTES)
+               alloc_size = L1_CACHE_BYTES;
+@@ -3416,7 +3426,11 @@
+               return -ENOMEM;
+       memcpy(ha->addr, addr, addr_len);
+       ha->type = addr_type;
++      ha->refcount = 1;
++      ha->synced = false;
+       list_add_tail_rcu(&ha->list, list);
++      if (delta)
++              (*delta)++;
+       return 0;
+ }
+ 
+@@ -3428,29 +3442,30 @@
+       kfree(ha);
+ }
+ 
+-static int __hw_addr_del_ii(struct list_head *list, unsigned char *addr,
+-                          int addr_len, unsigned char addr_type,
+-                          int ignore_index)
++static int __hw_addr_del(struct list_head *list, int *delta,
++                       unsigned char *addr, int addr_len,
++                       unsigned char addr_type)
+ {
+       struct netdev_hw_addr *ha;
+-      int i = 0;
+ 
+       list_for_each_entry(ha, list, list) {
+-              if (i++ != ignore_index &&
+-                  !memcmp(ha->addr, addr, addr_len) &&
++              if (!memcmp(ha->addr, addr, addr_len) &&
+                   (ha->type == addr_type || !addr_type)) {
++                      if (--ha->refcount)
++                              return 0;
+                       list_del_rcu(&ha->list);
+                       call_rcu(&ha->rcu_head, ha_rcu_free);
++                      if (delta)
++                              (*delta)--;
+                       return 0;
+               }
+       }
+       return -ENOENT;
+ }
+ 
+-static int __hw_addr_add_multiple_ii(struct list_head *to_list,
+-                                   struct list_head *from_list,
+-                                   int addr_len, unsigned char addr_type,
+-                                   int ignore_index)
++static int __hw_addr_add_multiple(struct list_head *to_list, int *to_delta,
++                                struct list_head *from_list, int addr_len,
++                                unsigned char addr_type)
+ {
+       int err;
+       struct netdev_hw_addr *ha, *ha2;
+@@ -3458,7 +3473,8 @@
+ 
+       list_for_each_entry(ha, from_list, list) {
+               type = addr_type ? addr_type : ha->type;
+-              err = __hw_addr_add(to_list, ha->addr, addr_len, type);
++              err = __hw_addr_add(to_list, to_delta, ha->addr,
++                                  addr_len, type);
+               if (err)
+                       goto unroll;
+       }
+@@ -3469,27 +3485,69 @@
+               if (ha2 == ha)
+                       break;
+               type = addr_type ? addr_type : ha2->type;
+-              __hw_addr_del_ii(to_list, ha2->addr, addr_len, type,
+-                               ignore_index);
++              __hw_addr_del(to_list, to_delta, ha2->addr,
++                            addr_len, type);
+       }
+       return err;
+ }
+ 
+-static void __hw_addr_del_multiple_ii(struct list_head *to_list,
+-                                    struct list_head *from_list,
+-                                    int addr_len, unsigned char addr_type,
+-                                    int ignore_index)
++static void __hw_addr_del_multiple(struct list_head *to_list, int *to_delta,
++                                 struct list_head *from_list, int addr_len,
++                                 unsigned char addr_type)
+ {
+       struct netdev_hw_addr *ha;
+       unsigned char type;
+ 
+       list_for_each_entry(ha, from_list, list) {
+               type = addr_type ? addr_type : ha->type;
+-              __hw_addr_del_ii(to_list, ha->addr, addr_len, addr_type,
+-                               ignore_index);
++              __hw_addr_del(to_list, to_delta, ha->addr,
++                            addr_len, addr_type);
+       }
+ }
+ 
++static int __hw_addr_sync(struct list_head *to_list, int *to_delta,
++                        struct list_head *from_list, int *from_delta,
++                        int addr_len)
++{
++      int err = 0;
++      struct netdev_hw_addr *ha, *tmp;
++
++      list_for_each_entry_safe(ha, tmp, from_list, list) {
++              if (!ha->synced) {
++                      err = __hw_addr_add(to_list, to_delta, ha->addr,
++                                          addr_len, ha->type);
++                      if (err)
++                              break;
++                      ha->synced = true;
++                      ha->refcount++;
++              } else if (ha->refcount == 1) {
++                      __hw_addr_del(to_list, to_delta, ha->addr,
++                                    addr_len, ha->type);
++                      __hw_addr_del(from_list, from_delta, ha->addr,
++                                    addr_len, ha->type);
++              }
++      }
++      return err;
++}
++
++static void __hw_addr_unsync(struct list_head *to_list, int *to_delta,
++                           struct list_head *from_list, int *from_delta,
++                           int addr_len)
++{
++      struct netdev_hw_addr *ha, *tmp;
++
++      list_for_each_entry_safe(ha, tmp, from_list, list) {
++              if (ha->synced) {
++                      __hw_addr_del(to_list, to_delta, ha->addr,
++                                    addr_len, ha->type);
++                      ha->synced = false;
++                      __hw_addr_del(from_list, from_delta, ha->addr,
++                                    addr_len, ha->type);
++              }
++      }
++}
++
++
+ static void __hw_addr_flush(struct list_head *list)
+ {
+       struct netdev_hw_addr *ha, *tmp;
+@@ -3520,7 +3578,7 @@
+ 
+       INIT_LIST_HEAD(&dev->dev_addr_list);
+       memset(addr, 0, sizeof(*addr));
+-      err = __hw_addr_add(&dev->dev_addr_list, addr, sizeof(*addr),
++      err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, sizeof(*addr),
+                           NETDEV_HW_ADDR_T_LAN);
+       if (!err) {
+               /*
+@@ -3552,7 +3610,7 @@
+ 
+       ASSERT_RTNL();
+ 
+-      err = __hw_addr_add(&dev->dev_addr_list, addr, dev->addr_len,
++      err = __hw_addr_add(&dev->dev_addr_list, NULL, addr, dev->addr_len,
+                           addr_type);
+       if (!err)
+               call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+@@ -3575,11 +3633,20 @@
+                unsigned char addr_type)
+ {
+       int err;
++      struct netdev_hw_addr *ha;
+ 
+       ASSERT_RTNL();
+ 
+-      err = __hw_addr_del_ii(&dev->dev_addr_list, addr, dev->addr_len,
+-                             addr_type, 0);
++      /*
++       * We can not remove the first address from the list because
++       * dev->dev_addr points to that.
++       */
++      ha = list_first_entry(&dev->dev_addr_list, struct netdev_hw_addr, list);
++      if (ha->addr == dev->dev_addr && ha->refcount == 1)
++              return -ENOENT;
++
++      err = __hw_addr_del(&dev->dev_addr_list, NULL, addr, dev->addr_len,
++                          addr_type);
+       if (!err)
+               call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+       return err;
+@@ -3606,9 +3673,9 @@
+ 
+       if (from_dev->addr_len != to_dev->addr_len)
+               return -EINVAL;
+-      err = __hw_addr_add_multiple_ii(&to_dev->dev_addr_list,
+-                                      &from_dev->dev_addr_list,
+-                                      to_dev->addr_len, addr_type, 0);
++      err = __hw_addr_add_multiple(&to_dev->dev_addr_list, NULL,
++                                   &from_dev->dev_addr_list,
++                                   to_dev->addr_len, addr_type);
+       if (!err)
+               call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
+       return err;
+@@ -3633,9 +3700,9 @@
+ 
+       if (from_dev->addr_len != to_dev->addr_len)
+               return -EINVAL;
+-      __hw_addr_del_multiple_ii(&to_dev->dev_addr_list,
+-                                &from_dev->dev_addr_list,
+-                                to_dev->addr_len, addr_type, 0);
++      __hw_addr_del_multiple(&to_dev->dev_addr_list, NULL,
++                             &from_dev->dev_addr_list,
++                             to_dev->addr_len, addr_type);
+       call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
+       return 0;
+ }
+@@ -3705,24 +3772,22 @@
+  *    dev_unicast_delete      - Release secondary unicast address.
+  *    @dev: device
+  *    @addr: address to delete
+- *    @alen: length of @addr
+  *
+  *    Release reference to a secondary unicast address and remove it
+  *    from the device if the reference count drops to zero.
+  *
+  *    The caller must hold the rtnl_mutex.
+  */
+-int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
++int dev_unicast_delete(struct net_device *dev, void *addr)
+ {
+       int err;
+ 
+       ASSERT_RTNL();
+ 
+-      netif_addr_lock_bh(dev);
+-      err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
++      err = __hw_addr_del(&dev->uc_list, &dev->uc_count, addr,
++                          dev->addr_len, NETDEV_HW_ADDR_T_UNICAST);
+       if (!err)
+               __dev_set_rx_mode(dev);
+-      netif_addr_unlock_bh(dev);
+       return err;
+ }
+ EXPORT_SYMBOL(dev_unicast_delete);
+@@ -3731,24 +3796,22 @@
+  *    dev_unicast_add         - add a secondary unicast address
+  *    @dev: device
+  *    @addr: address to add
+- *    @alen: length of @addr
+  *
+  *    Add a secondary unicast address to the device or increase
+  *    the reference count if it already exists.
+  *
+  *    The caller must hold the rtnl_mutex.
+  */
+-int dev_unicast_add(struct net_device *dev, void *addr, int alen)
++int dev_unicast_add(struct net_device *dev, void *addr)
+ {
+       int err;
+ 
+       ASSERT_RTNL();
+ 
+-      netif_addr_lock_bh(dev);
+-      err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
++      err = __hw_addr_add(&dev->uc_list, &dev->uc_count, addr,
++                          dev->addr_len, NETDEV_HW_ADDR_T_UNICAST);
+       if (!err)
+               __dev_set_rx_mode(dev);
+-      netif_addr_unlock_bh(dev);
+       return err;
+ }
+ EXPORT_SYMBOL(dev_unicast_add);
+@@ -3805,8 +3868,7 @@
+  *    @from: source device
+  *
+  *    Add newly added addresses to the destination device and release
+- *    addresses that have no users left. The source device must be
+- *    locked by netif_addr_lock_bh.
++ *    addresses that have no users left.
+  *
+  *    This function is intended to be called from the dev->set_rx_mode
+  *    function of layered software devices.
+@@ -3815,12 +3877,15 @@
+ {
+       int err = 0;
+ 
+-      netif_addr_lock_bh(to);
+-      err = __dev_addr_sync(&to->uc_list, &to->uc_count,
+-                            &from->uc_list, &from->uc_count);
++      ASSERT_RTNL();
++
++      if (to->addr_len != from->addr_len)
++              return -EINVAL;
++
++      err = __hw_addr_sync(&to->uc_list, &to->uc_count,
++                           &from->uc_list, &from->uc_count, to->addr_len);
+       if (!err)
+               __dev_set_rx_mode(to);
+-      netif_addr_unlock_bh(to);
+       return err;
+ }
+ EXPORT_SYMBOL(dev_unicast_sync);
+@@ -3836,18 +3901,33 @@
+  */
+ void dev_unicast_unsync(struct net_device *to, struct net_device *from)
+ {
+-      netif_addr_lock_bh(from);
+-      netif_addr_lock(to);
++      ASSERT_RTNL();
+ 
+-      __dev_addr_unsync(&to->uc_list, &to->uc_count,
+-                        &from->uc_list, &from->uc_count);
+-      __dev_set_rx_mode(to);
++      if (to->addr_len != from->addr_len)
++              return;
+ 
+-      netif_addr_unlock(to);
+-      netif_addr_unlock_bh(from);
++      __hw_addr_unsync(&to->uc_list, &to->uc_count,
++                       &from->uc_list, &from->uc_count, to->addr_len);
++      __dev_set_rx_mode(to);
+ }
+ EXPORT_SYMBOL(dev_unicast_unsync);
+ 
++static void dev_unicast_flush(struct net_device *dev)
++{
++      /* rtnl_mutex must be held here */
++
++      __hw_addr_flush(&dev->uc_list);
++      dev->uc_count = 0;
++}
++
++static void dev_unicast_init(struct net_device *dev)
++{
++      /* rtnl_mutex must be held here */
++
++      INIT_LIST_HEAD(&dev->uc_list);
++}
++
++
+ static void __dev_addr_discard(struct dev_addr_list **list)
+ {
+       struct dev_addr_list *tmp;
+@@ -3866,9 +3946,6 @@
+ {
+       netif_addr_lock_bh(dev);
+ 
+-      __dev_addr_discard(&dev->uc_list);
+-      dev->uc_count = 0;
+-
+       __dev_addr_discard(&dev->mc_list);
+       dev->mc_count = 0;
+ 
+@@ -4459,6 +4536,7 @@
+       /*
+        *      Flush the unicast and multicast chains
+        */
++      dev_unicast_flush(dev);
+       dev_addr_discard(dev);
+ 
+       if (dev->netdev_ops->ndo_uninit)
+@@ -4975,6 +5053,8 @@
+       dev = (struct net_device *)
+               (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
+       dev->padded = (char *)dev - (char *)p;
++      dev_unicast_init(dev);
++
+       dev_net_set(dev, &init_net);
+ 
+       dev->_tx = tx;
+@@ -5173,6 +5253,7 @@
+       /*
+        *      Flush the unicast and multicast chains
+        */
++      dev_unicast_flush(dev);
+       dev_addr_discard(dev);
+ 
+       netdev_unregister_kobject(dev);
diff --git a/debian/patches/libdde_workqueue.patch 
b/debian/patches/libdde_workqueue.patch
new file mode 100644
index 0000000..724f12a
--- /dev/null
+++ b/debian/patches/libdde_workqueue.patch
@@ -0,0 +1,55 @@
+diff --git a/libdde_linux26/contrib/include/linux/workqueue.h 
b/libdde_linux26/contrib/include/linux/workqueue.h
+index 3cd51e5..cf24c20 100644
+--- a/libdde_linux26/contrib/include/linux/workqueue.h
++++ b/libdde_linux26/contrib/include/linux/workqueue.h
+@@ -41,6 +41,11 @@ struct delayed_work {
+       struct timer_list timer;
+ };
+ 
++static inline struct delayed_work *to_delayed_work(struct work_struct *work)
++{
++      return container_of(work, struct delayed_work, work);
++}
++
+ struct execute_work {
+       struct work_struct work;
+ };
+@@ -89,7 +94,7 @@ struct execute_work {
+ /*
+  * initialize all of a work item in one go
+  *
+- * NOTE! No point in using "atomic_long_set()": useing a direct
++ * NOTE! No point in using "atomic_long_set()": using a direct
+  * assignment of the work data initializer allows the compiler
+  * to generate better code.
+  */
+@@ -202,6 +207,7 @@ extern int queue_delayed_work_on(int cpu, struct 
workqueue_struct *wq,
+ 
+ extern void flush_workqueue(struct workqueue_struct *wq);
+ extern void flush_scheduled_work(void);
++extern void flush_delayed_work(struct delayed_work *work);
+ 
+ extern int schedule_work(struct work_struct *work);
+ extern int schedule_work_on(int cpu, struct work_struct *work);
+@@ -235,6 +241,21 @@ static inline int cancel_delayed_work(struct delayed_work 
*work)
+       return ret;
+ }
+ 
++/*
++ * Like above, but uses del_timer() instead of del_timer_sync(). This means,
++ * if it returns 0 the timer function may be running and the queueing is in
++ * progress.
++ */
++static inline int __cancel_delayed_work(struct delayed_work *work)
++{
++      int ret;
++
++      ret = del_timer(&work->timer);
++      if (ret)
++              work_clear_pending(&work->work);
++      return ret;
++}
++
+ extern int cancel_delayed_work_sync(struct delayed_work *work);
+ 
+ /* Obsolete. use cancel_delayed_work_sync() */
diff --git a/debian/patches/series b/debian/patches/series
index 34acbbc..d650916 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -30,3 +30,24 @@ posix-sigcodes.patch
 random-default-fast.patch
 libexec.patch
 run.patch
+libdde_netdev_tx_t.patch
+libdde_pci-needs_freset.patch
+libdde_addr_list.patch
+libdde_ucast_list.patch
+libdde_addr_fix.patch
+libdde_group_addr.patch
+libdde_rculist.patch
+libdde_dma_head.patch
+libdde_mdio.patch
+libdde_ethoc.patch
+libdde_phy.patch
+libdde_pci_ids.h.patch
+libdde_ethtool.patch
+libdde_workqueue.patch
+libdde_trans_start.patch
+libdde_config.patch
+libdde_devres.patch
+libdde_pr_cont.patch
+libdde_fixes.patch
+libdde_rx_queue.patch
+libdde_rcu.patch

-- 
Debian GNU Hurd packaging



reply via email to

[Prev in Thread] Current Thread [Next in Thread]