summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorLachlan McIlroy <lachlan@redback.melbourne.sgi.com>2008-12-22 17:34:26 +1100
committerLachlan McIlroy <lachlan@redback.melbourne.sgi.com>2008-12-22 17:34:26 +1100
commit27a0464a6cb837d3a90b6e69365dfc01cb0dff2f (patch)
tree361f00f4d4ad4c01b6144c52004bee2b948742ad /net
parent4d9d4ebf5de848e3450e23e4db9ac74e23e5daa6 (diff)
parent3d44cc3e01ee1b40317f79ed54324e25c4f848df (diff)
[XFS] Fix merge conflict in fs/xfs/xfs_rename.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 Conflicts: fs/xfs/xfs_rename.c Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
Diffstat (limited to 'net')
-rw-r--r--net/atm/svc.c6
-rw-r--r--net/can/af_can.c68
-rw-r--r--net/can/bcm.c7
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c2
-rw-r--r--net/ipv4/tcp_output.c22
-rw-r--r--net/ipv4/tcp_vegas.c82
-rw-r--r--net/ipv6/ndisc.c7
-rw-r--r--net/mac80211/sta_info.c2
-rw-r--r--net/netfilter/xt_socket.c2
-rw-r--r--net/netlabel/netlabel_unlabeled.c48
-rw-r--r--net/phonet/pep-gprs.c27
-rw-r--r--net/phonet/pn_netlink.c3
-rw-r--r--net/sched/sch_netem.c3
-rw-r--r--net/xfrm/xfrm_policy.c1
15 files changed, 146 insertions, 136 deletions
diff --git a/net/atm/svc.c b/net/atm/svc.c
index de1e4f2f3a43..8fb54dc870b3 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -293,7 +293,10 @@ static int svc_listen(struct socket *sock,int backlog)
error = -EINVAL;
goto out;
}
- vcc_insert_socket(sk);
+ if (test_bit(ATM_VF_LISTEN, &vcc->flags)) {
+ error = -EADDRINUSE;
+ goto out;
+ }
set_bit(ATM_VF_WAITING, &vcc->flags);
prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
sigd_enq(vcc,as_listen,NULL,NULL,&vcc->local);
@@ -307,6 +310,7 @@ static int svc_listen(struct socket *sock,int backlog)
goto out;
}
set_bit(ATM_VF_LISTEN,&vcc->flags);
+ vcc_insert_socket(sk);
sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT;
error = -sk->sk_err;
out:
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 7d4d2b3c137e..3dadb338addd 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -319,23 +319,52 @@ static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
return n ? d : NULL;
}
+/**
+ * find_rcv_list - determine optimal filterlist inside device filter struct
+ * @can_id: pointer to CAN identifier of a given can_filter
+ * @mask: pointer to CAN mask of a given can_filter
+ * @d: pointer to the device filter struct
+ *
+ * Description:
+ * Returns the optimal filterlist to reduce the filter handling in the
+ * receive path. This function is called by service functions that need
+ * to register or unregister a can_filter in the filter lists.
+ *
+ * A filter matches in general, when
+ *
+ * <received_can_id> & mask == can_id & mask
+ *
+ * so every bit set in the mask (even CAN_EFF_FLAG, CAN_RTR_FLAG) describe
+ * relevant bits for the filter.
+ *
+ * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
+ * filter for error frames (CAN_ERR_FLAG bit set in mask). For error frames
+ * there is a special filterlist and a special rx path filter handling.
+ *
+ * Return:
+ * Pointer to optimal filterlist for the given can_id/mask pair.
+ * Constistency checked mask.
+ * Reduced can_id to have a preprocessed filter compare value.
+ */
static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
struct dev_rcv_lists *d)
{
canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */
- /* filter error frames */
+ /* filter for error frames in extra filterlist */
if (*mask & CAN_ERR_FLAG) {
- /* clear CAN_ERR_FLAG in list entry */
+ /* clear CAN_ERR_FLAG in filter entry */
*mask &= CAN_ERR_MASK;
return &d->rx[RX_ERR];
}
- /* ensure valid values in can_mask */
- if (*mask & CAN_EFF_FLAG)
- *mask &= (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG);
- else
- *mask &= (CAN_SFF_MASK | CAN_RTR_FLAG);
+ /* with cleared CAN_ERR_FLAG we have a simple mask/value filterpair */
+
+#define CAN_EFF_RTR_FLAGS (CAN_EFF_FLAG | CAN_RTR_FLAG)
+
+ /* ensure valid values in can_mask for 'SFF only' frame filtering */
+ if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG))
+ *mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS);
/* reduce condition testing at receive time */
*can_id &= *mask;
@@ -348,15 +377,19 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
if (!(*mask))
return &d->rx[RX_ALL];
- /* use extra filterset for the subscription of exactly *ONE* can_id */
- if (*can_id & CAN_EFF_FLAG) {
- if (*mask == (CAN_EFF_MASK | CAN_EFF_FLAG)) {
- /* RFC: a use-case for hash-tables in the future? */
- return &d->rx[RX_EFF];
+ /* extra filterlists for the subscription of a single non-RTR can_id */
+ if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS)
+ && !(*can_id & CAN_RTR_FLAG)) {
+
+ if (*can_id & CAN_EFF_FLAG) {
+ if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) {
+ /* RFC: a future use-case for hash-tables? */
+ return &d->rx[RX_EFF];
+ }
+ } else {
+ if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS))
+ return &d->rx_sff[*can_id];
}
- } else {
- if (*mask == CAN_SFF_MASK)
- return &d->rx_sff[*can_id];
}
/* default: filter via can_id/can_mask */
@@ -589,7 +622,10 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
}
}
- /* check CAN_ID specific entries */
+ /* check filterlists for single non-RTR can_ids */
+ if (can_id & CAN_RTR_FLAG)
+ return matches;
+
if (can_id & CAN_EFF_FLAG) {
hlist_for_each_entry_rcu(r, n, &d->rx[RX_EFF], list) {
if (r->can_id == can_id) {
diff --git a/net/can/bcm.c b/net/can/bcm.c
index d0dd382001e2..da0d426c0ce4 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -64,10 +64,11 @@
#define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */
/* get best masking value for can_rx_register() for a given single can_id */
-#define REGMASK(id) ((id & CAN_RTR_FLAG) | ((id & CAN_EFF_FLAG) ? \
- (CAN_EFF_MASK | CAN_EFF_FLAG) : CAN_SFF_MASK))
+#define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
+ (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
+ (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
-#define CAN_BCM_VERSION "20080415"
+#define CAN_BCM_VERSION CAN_VERSION
static __initdata const char banner[] = KERN_INFO
"can: broadcast manager protocol (rev " CAN_BCM_VERSION ")\n";
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 6c7af390be0a..dadac6281f20 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -133,9 +133,11 @@ static int poll_one_napi(struct netpoll_info *npinfo,
npinfo->rx_flags |= NETPOLL_RX_DROP;
atomic_inc(&trapped);
+ set_bit(NAPI_STATE_NPSVC, &napi->state);
work = napi->poll(napi, budget);
+ clear_bit(NAPI_STATE_NPSVC, &napi->state);
atomic_dec(&trapped);
npinfo->rx_flags &= ~NETPOLL_RX_DROP;
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index bea54a685109..8d489e746b21 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -61,7 +61,7 @@ static struct
static struct xt_table nat_table = {
.name = "nat",
.valid_hooks = NAT_VALID_HOOKS,
- .lock = __RW_LOCK_UNLOCKED(__nat_table.lock),
+ .lock = __RW_LOCK_UNLOCKED(nat_table.lock),
.me = THIS_MODULE,
.af = AF_INET,
};
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 85b07eba1879..fe3b4bdfd251 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -722,8 +722,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb,
unsigned int mss_now)
{
- if (skb->len <= mss_now || !sk_can_gso(sk) ||
- tcp_urg_mode(tcp_sk(sk))) {
+ if (skb->len <= mss_now || !sk_can_gso(sk)) {
/* Avoid the costly divide in the normal
* non-TSO case.
*/
@@ -1029,10 +1028,6 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
/* Compute the current effective MSS, taking SACKs and IP options,
* and even PMTU discovery events into account.
- *
- * LARGESEND note: !tcp_urg_mode is overkill, only frames up to snd_up
- * cannot be large. However, taking into account rare use of URG, this
- * is not a big flaw.
*/
unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
{
@@ -1047,7 +1042,7 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
mss_now = tp->mss_cache;
- if (large_allowed && sk_can_gso(sk) && !tcp_urg_mode(tp))
+ if (large_allowed && sk_can_gso(sk))
doing_tso = 1;
if (dst) {
@@ -1164,9 +1159,7 @@ static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb,
{
int tso_segs = tcp_skb_pcount(skb);
- if (!tso_segs ||
- (tso_segs > 1 && (tcp_skb_mss(skb) != mss_now ||
- tcp_urg_mode(tcp_sk(sk))))) {
+ if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
tcp_set_skb_tso_segs(sk, skb, mss_now);
tso_segs = tcp_skb_pcount(skb);
}
@@ -1519,6 +1512,10 @@ static int tcp_mtu_probe(struct sock *sk)
* send_head. This happens as incoming acks open up the remote
* window for us.
*
+ * LARGESEND note: !tcp_urg_mode is overkill, only frames between
+ * snd_up-64k-mss .. snd_up cannot be large. However, taking into
+ * account rare use of URG, this is not a big flaw.
+ *
* Returns 1, if no segments are in flight and we have queued segments, but
* cannot send anything now because of SWS or another problem.
*/
@@ -1570,7 +1567,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
}
limit = mss_now;
- if (tso_segs > 1)
+ if (tso_segs > 1 && !tcp_urg_mode(tp))
limit = tcp_mss_split_point(sk, skb, mss_now,
cwnd_quota);
@@ -1619,6 +1616,7 @@ void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
*/
void tcp_push_one(struct sock *sk, unsigned int mss_now)
{
+ struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb = tcp_send_head(sk);
unsigned int tso_segs, cwnd_quota;
@@ -1633,7 +1631,7 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
BUG_ON(!tso_segs);
limit = mss_now;
- if (tso_segs > 1)
+ if (tso_segs > 1 && !tcp_urg_mode(tp))
limit = tcp_mss_split_point(sk, skb, mss_now,
cwnd_quota);
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index 14504dada116..a453aac91bd3 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -40,18 +40,14 @@
#include "tcp_vegas.h"
-/* Default values of the Vegas variables, in fixed-point representation
- * with V_PARAM_SHIFT bits to the right of the binary point.
- */
-#define V_PARAM_SHIFT 1
-static int alpha = 2<<V_PARAM_SHIFT;
-static int beta = 4<<V_PARAM_SHIFT;
-static int gamma = 1<<V_PARAM_SHIFT;
+static int alpha = 2;
+static int beta = 4;
+static int gamma = 1;
module_param(alpha, int, 0644);
-MODULE_PARM_DESC(alpha, "lower bound of packets in network (scale by 2)");
+MODULE_PARM_DESC(alpha, "lower bound of packets in network");
module_param(beta, int, 0644);
-MODULE_PARM_DESC(beta, "upper bound of packets in network (scale by 2)");
+MODULE_PARM_DESC(beta, "upper bound of packets in network");
module_param(gamma, int, 0644);
MODULE_PARM_DESC(gamma, "limit on increase (scale by 2)");
@@ -172,49 +168,13 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
return;
}
- /* The key players are v_beg_snd_una and v_beg_snd_nxt.
- *
- * These are so named because they represent the approximate values
- * of snd_una and snd_nxt at the beginning of the current RTT. More
- * precisely, they represent the amount of data sent during the RTT.
- * At the end of the RTT, when we receive an ACK for v_beg_snd_nxt,
- * we will calculate that (v_beg_snd_nxt - v_beg_snd_una) outstanding
- * bytes of data have been ACKed during the course of the RTT, giving
- * an "actual" rate of:
- *
- * (v_beg_snd_nxt - v_beg_snd_una) / (rtt duration)
- *
- * Unfortunately, v_beg_snd_una is not exactly equal to snd_una,
- * because delayed ACKs can cover more than one segment, so they
- * don't line up nicely with the boundaries of RTTs.
- *
- * Another unfortunate fact of life is that delayed ACKs delay the
- * advance of the left edge of our send window, so that the number
- * of bytes we send in an RTT is often less than our cwnd will allow.
- * So we keep track of our cwnd separately, in v_beg_snd_cwnd.
- */
-
if (after(ack, vegas->beg_snd_nxt)) {
/* Do the Vegas once-per-RTT cwnd adjustment. */
- u32 old_wnd, old_snd_cwnd;
-
-
- /* Here old_wnd is essentially the window of data that was
- * sent during the previous RTT, and has all
- * been acknowledged in the course of the RTT that ended
- * with the ACK we just received. Likewise, old_snd_cwnd
- * is the cwnd during the previous RTT.
- */
- old_wnd = (vegas->beg_snd_nxt - vegas->beg_snd_una) /
- tp->mss_cache;
- old_snd_cwnd = vegas->beg_snd_cwnd;
/* Save the extent of the current window so we can use this
* at the end of the next RTT.
*/
- vegas->beg_snd_una = vegas->beg_snd_nxt;
vegas->beg_snd_nxt = tp->snd_nxt;
- vegas->beg_snd_cwnd = tp->snd_cwnd;
/* We do the Vegas calculations only if we got enough RTT
* samples that we can be reasonably sure that we got
@@ -252,22 +212,14 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
*
* This is:
* (actual rate in segments) * baseRTT
- * We keep it as a fixed point number with
- * V_PARAM_SHIFT bits to the right of the binary point.
*/
- target_cwnd = ((u64)old_wnd * vegas->baseRTT);
- target_cwnd <<= V_PARAM_SHIFT;
- do_div(target_cwnd, rtt);
+ target_cwnd = tp->snd_cwnd * vegas->baseRTT / rtt;
/* Calculate the difference between the window we had,
* and the window we would like to have. This quantity
* is the "Diff" from the Arizona Vegas papers.
- *
- * Again, this is a fixed point number with
- * V_PARAM_SHIFT bits to the right of the binary
- * point.
*/
- diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd;
+ diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT;
if (diff > gamma && tp->snd_ssthresh > 2 ) {
/* Going too fast. Time to slow down
@@ -282,16 +234,13 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
* truncation robs us of full link
* utilization.
*/
- tp->snd_cwnd = min(tp->snd_cwnd,
- ((u32)target_cwnd >>
- V_PARAM_SHIFT)+1);
+ tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1);
} else if (tp->snd_cwnd <= tp->snd_ssthresh) {
/* Slow start. */
tcp_slow_start(tp);
} else {
/* Congestion avoidance. */
- u32 next_snd_cwnd;
/* Figure out where we would like cwnd
* to be.
@@ -300,32 +249,25 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
/* The old window was too fast, so
* we slow down.
*/
- next_snd_cwnd = old_snd_cwnd - 1;
+ tp->snd_cwnd--;
} else if (diff < alpha) {
/* We don't have enough extra packets
* in the network, so speed up.
*/
- next_snd_cwnd = old_snd_cwnd + 1;
+ tp->snd_cwnd++;
} else {
/* Sending just as fast as we
* should be.
*/
- next_snd_cwnd = old_snd_cwnd;
}
-
- /* Adjust cwnd upward or downward, toward the
- * desired value.
- */
- if (next_snd_cwnd > tp->snd_cwnd)
- tp->snd_cwnd++;
- else if (next_snd_cwnd < tp->snd_cwnd)
- tp->snd_cwnd--;
}
if (tp->snd_cwnd < 2)
tp->snd_cwnd = 2;
else if (tp->snd_cwnd > tp->snd_cwnd_clamp)
tp->snd_cwnd = tp->snd_cwnd_clamp;
+
+ tp->snd_ssthresh = tcp_current_ssthresh(sk);
}
/* Wipe the slate clean for the next RTT. */
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 172438320eec..d0f54d18e19b 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -912,8 +912,13 @@ static void ndisc_recv_na(struct sk_buff *skb)
is invalid, but ndisc specs say nothing
about it. It could be misconfiguration, or
an smart proxy agent tries to help us :-)
+
+ We should not print the error if NA has been
+ received from loopback - it is just our own
+ unsolicited advertisement.
*/
- ND_PRINTK1(KERN_WARNING
+ if (skb->pkt_type != PACKET_LOOPBACK)
+ ND_PRINTK1(KERN_WARNING
"ICMPv6 NA: someone advertises our address on %s!\n",
ifp->idev->dev->name);
in6_ifa_put(ifp);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 7fef8ea1f5ec..d254446b85b5 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -99,7 +99,7 @@ struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr)
sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]);
while (sta) {
- if (compare_ether_addr(sta->sta.addr, addr) == 0)
+ if (memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
break;
sta = rcu_dereference(sta->hnext);
}
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 02a8fed21082..1acc089be7e9 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -141,7 +141,7 @@ socket_mt(const struct sk_buff *skb, const struct xt_match_param *par)
sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol,
saddr, daddr, sport, dport, par->in, false);
if (sk != NULL) {
- bool wildcard = (inet_sk(sk)->rcv_saddr == 0);
+ bool wildcard = (sk->sk_state != TCP_TIME_WAIT && inet_sk(sk)->rcv_saddr == 0);
nf_tproxy_put_sock(sk);
if (wildcard)
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index e8a5c32b0f10..8c0308032178 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -562,7 +562,6 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
const struct in_addr *mask,
struct netlbl_audit *audit_info)
{
- int ret_val = 0;
struct netlbl_af4list *list_entry;
struct netlbl_unlhsh_addr4 *entry;
struct audit_buffer *audit_buf;
@@ -574,9 +573,10 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
list_entry = netlbl_af4list_remove(addr->s_addr, mask->s_addr,
&iface->addr4_list);
spin_unlock(&netlbl_unlhsh_lock);
- if (list_entry == NULL)
- ret_val = -ENOENT;
- entry = netlbl_unlhsh_addr4_entry(list_entry);
+ if (list_entry != NULL)
+ entry = netlbl_unlhsh_addr4_entry(list_entry);
+ else
+ entry = NULL;
audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL,
audit_info);
@@ -587,19 +587,21 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
addr->s_addr, mask->s_addr);
if (dev != NULL)
dev_put(dev);
- if (entry && security_secid_to_secctx(entry->secid,
- &secctx,
- &secctx_len) == 0) {
+ if (entry != NULL &&
+ security_secid_to_secctx(entry->secid,
+ &secctx, &secctx_len) == 0) {
audit_log_format(audit_buf, " sec_obj=%s", secctx);
security_release_secctx(secctx, secctx_len);
}
- audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0);
+ audit_log_format(audit_buf, " res=%u", entry != NULL ? 1 : 0);
audit_log_end(audit_buf);
}
- if (ret_val == 0)
- call_rcu(&entry->rcu, netlbl_unlhsh_free_addr4);
- return ret_val;
+ if (entry == NULL)
+ return -ENOENT;
+
+ call_rcu(&entry->rcu, netlbl_unlhsh_free_addr4);
+ return 0;
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -623,7 +625,6 @@ static int netlbl_unlhsh_remove_addr6(struct net *net,
const struct in6_addr *mask,
struct netlbl_audit *audit_info)
{
- int ret_val = 0;
struct netlbl_af6list *list_entry;
struct netlbl_unlhsh_addr6 *entry;
struct audit_buffer *audit_buf;
@@ -634,9 +635,10 @@ static int netlbl_unlhsh_remove_addr6(struct net *net,
spin_lock(&netlbl_unlhsh_lock);
list_entry = netlbl_af6list_remove(addr, mask, &iface->addr6_list);
spin_unlock(&netlbl_unlhsh_lock);
- if (list_entry == NULL)
- ret_val = -ENOENT;
- entry = netlbl_unlhsh_addr6_entry(list_entry);
+ if (list_entry != NULL)
+ entry = netlbl_unlhsh_addr6_entry(list_entry);
+ else
+ entry = NULL;
audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL,
audit_info);
@@ -647,19 +649,21 @@ static int netlbl_unlhsh_remove_addr6(struct net *net,
addr, mask);
if (dev != NULL)
dev_put(dev);
- if (entry && security_secid_to_secctx(entry->secid,
- &secctx,
- &secctx_len) == 0) {
+ if (entry != NULL &&
+ security_secid_to_secctx(entry->secid,
+ &secctx, &secctx_len) == 0) {
audit_log_format(audit_buf, " sec_obj=%s", secctx);
security_release_secctx(secctx, secctx_len);
}
- audit_log_format(audit_buf, " res=%u", ret_val == 0 ? 1 : 0);
+ audit_log_format(audit_buf, " res=%u", entry != NULL ? 1 : 0);
audit_log_end(audit_buf);
}
- if (ret_val == 0)
- call_rcu(&entry->rcu, netlbl_unlhsh_free_addr6);
- return ret_val;
+ if (entry == NULL)
+ return -ENOENT;
+
+ call_rcu(&entry->rcu, netlbl_unlhsh_free_addr6);
+ return 0;
}
#endif /* IPv6 */
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index 9978afbd9f2a..803eeef0aa85 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -155,12 +155,13 @@ static void gprs_data_ready(struct sock *sk, int len)
static void gprs_write_space(struct sock *sk)
{
struct gprs_dev *dev = sk->sk_user_data;
+ struct net_device *net = dev->net;
unsigned credits = pep_writeable(sk);
spin_lock_bh(&dev->tx_lock);
dev->tx_max = credits;
- if (credits > skb_queue_len(&dev->tx_queue))
- netif_wake_queue(dev->net);
+ if (credits > skb_queue_len(&dev->tx_queue) && netif_running(net))
+ netif_wake_queue(net);
spin_unlock_bh(&dev->tx_lock);
}
@@ -168,6 +169,23 @@ static void gprs_write_space(struct sock *sk)
* Network device callbacks
*/
+static int gprs_open(struct net_device *dev)
+{
+ struct gprs_dev *gp = netdev_priv(dev);
+
+ gprs_write_space(gp->sk);
+ return 0;
+}
+
+static int gprs_close(struct net_device *dev)
+{
+ struct gprs_dev *gp = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ flush_work(&gp->tx_work);
+ return 0;
+}
+
static int gprs_xmit(struct sk_buff *skb, struct net_device *net)
{
struct gprs_dev *dev = netdev_priv(net);
@@ -254,6 +272,8 @@ static void gprs_setup(struct net_device *net)
net->tx_queue_len = 10;
net->destructor = free_netdev;
+ net->open = gprs_open;
+ net->stop = gprs_close;
net->hard_start_xmit = gprs_xmit; /* mandatory */
net->change_mtu = gprs_set_mtu;
net->get_stats = gprs_get_stats;
@@ -318,7 +338,6 @@ int gprs_attach(struct sock *sk)
dev->sk = sk;
printk(KERN_DEBUG"%s: attached\n", net->name);
- gprs_write_space(sk); /* kick off TX */
return net->ifindex;
out_rel:
@@ -341,7 +360,5 @@ void gprs_detach(struct sock *sk)
printk(KERN_DEBUG"%s: detached\n", net->name);
unregister_netdev(net);
- flush_scheduled_work();
sock_put(sk);
- skb_queue_purge(&dev->tx_queue);
}
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index b1770d66bc8d..242fe8f8c322 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -123,6 +123,7 @@ nla_put_failure:
static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
{
+ struct net *net = sock_net(skb->sk);
struct phonet_device *pnd;
int dev_idx = 0, dev_start_idx = cb->args[0];
int addr_idx = 0, addr_start_idx = cb->args[1];
@@ -131,6 +132,8 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
list_for_each_entry(pnd, &pndevs.list, list) {
u8 addr;
+ if (!net_eq(dev_net(pnd->netdev), net))
+ continue;
if (dev_idx > dev_start_idx)
addr_start_idx = 0;
if (dev_idx++ < dev_start_idx)
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index a11959908d9a..98402f0efa47 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -46,9 +46,6 @@
layering other disciplines. It does not need to do bandwidth
control either since that can be handled by using token
bucket or other rate control.
-
- The simulator is limited by the Linux timer resolution
- and will create packet bursts on the HZ boundary (1ms).
*/
struct netem_sched_data {
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 058f04f54b90..fb216c9adf86 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -817,6 +817,7 @@ int xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info)
continue;
hlist_del(&pol->bydst);
hlist_del(&pol->byidx);
+ list_del(&pol->walk.all);
write_unlock_bh(&xfrm_policy_lock);
xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,