summaryrefslogtreecommitdiff
path: root/net/tipc/bcast.c
diff options
context:
space:
mode:
authorAllan Stephens <allan.stephens@windriver.com>2011-10-26 15:33:44 -0400
committerPaul Gortmaker <paul.gortmaker@windriver.com>2012-02-06 16:59:17 -0500
commit8a275a6a30ba871eb34ea41c1fbb507039f4c0dc (patch)
tree249a3987649b2e7c35c5099071ecaf1355308c80 /net/tipc/bcast.c
parent57732560d1aa7d454d10e557f8959d19d1454174 (diff)
tipc: Fix node lock reclamation issues in broadcast link reception
Fixes a pair of problems in broadcast link message reception code relating to the reclamation of the node lock after consuming an in-sequence message. 1) Now retests to see if the sending node is still up after reclaiming the node lock, and bails out if it is non-operational. 2) Now manipulates the node's deferred message queue only after reclaiming the node lock, rather than using queue head pointer information that was cached previously. Signed-off-by: Allan Stephens <allan.stephens@windriver.com> Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Diffstat (limited to 'net/tipc/bcast.c')
-rw-r--r--net/tipc/bcast.c58
1 files changed, 40 insertions, 18 deletions
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 7342abc2cfa1..e7df313020ce 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -474,7 +474,7 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
struct tipc_node *node;
u32 next_in;
u32 seqno;
- struct sk_buff *deferred;
+ int deferred;
/* Screen out unwanted broadcast messages */
@@ -489,6 +489,8 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
if (unlikely(!node->bclink.supported))
goto unlock;
+ /* Handle broadcast protocol message */
+
if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
if (msg_type(msg) != STATE_MSG)
goto unlock;
@@ -513,11 +515,11 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
/* Handle in-sequence broadcast message */
-receive:
- next_in = mod(node->bclink.last_in + 1);
seqno = msg_seqno(msg);
+ next_in = mod(node->bclink.last_in + 1);
if (likely(seqno == next_in)) {
+receive:
bcl->stats.recv_info++;
node->bclink.last_in++;
bclink_set_gap(node);
@@ -551,23 +553,40 @@ receive:
buf_discard(buf);
}
buf = NULL;
+
+ /* Determine new synchronization state */
+
tipc_node_lock(node);
- deferred = node->bclink.deferred_head;
- if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) {
- buf = deferred;
- msg = buf_msg(buf);
- node->bclink.deferred_head = deferred->next;
- goto receive;
- }
- } else if (less(next_in, seqno)) {
+ if (unlikely(!tipc_node_is_up(node)))
+ goto unlock;
+
+ if (!node->bclink.deferred_head)
+ goto unlock;
+
+ msg = buf_msg(node->bclink.deferred_head);
+ seqno = msg_seqno(msg);
+ next_in = mod(next_in + 1);
+ if (seqno != next_in)
+ goto unlock;
+
+ /* Take in-sequence message from deferred queue & deliver it */
+
+ buf = node->bclink.deferred_head;
+ node->bclink.deferred_head = buf->next;
+ goto receive;
+ }
+
+ /* Handle out-of-sequence broadcast message */
+
+ if (less(next_in, seqno)) {
u32 gap_after = node->bclink.gap_after;
u32 gap_to = node->bclink.gap_to;
- if (tipc_link_defer_pkt(&node->bclink.deferred_head,
- &node->bclink.deferred_tail,
- buf)) {
+ deferred = tipc_link_defer_pkt(&node->bclink.deferred_head,
+ &node->bclink.deferred_tail,
+ buf);
+ if (deferred) {
node->bclink.nack_sync++;
- bcl->stats.deferred_recv++;
if (seqno == mod(gap_after + 1))
node->bclink.gap_after = seqno;
else if (less(gap_after, seqno) && less(seqno, gap_to))
@@ -579,9 +598,12 @@ receive:
bclink_send_nack(node);
bclink_set_gap(node);
}
- } else {
- bcl->stats.duplicates++;
- }
+ } else
+ deferred = 0;
+
+ if (deferred)
+ bcl->stats.deferred_recv++;
+
unlock:
tipc_node_unlock(node);
exit: