25d24 < * Ira Burton : Support for SOCK_CLUSTER 2996,3012d2994 < < /* Ira Burton < * Determines is the packet should be sent due to the window, identical to TCP code < * except calls cluster functions. < */ < static __inline__ void __cluster_data_snd_check(struct sock *sk, struct sk_buff *skb) < { < struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp); < < if (after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd) || < tcp_packets_in_flight(tp) >= tp->snd_cwnd || < cluster_write_xmit(sk, tp->nonagle)) < tcp_check_probe_timer(sk, tp); < } < < < 3022,3034d3003 < /* Ira Burton < * Attempts to send data and reclaim socket space, identical to TCP code < * except calls cluster functions. < */ < static __inline__ void cluster_data_snd_check(struct sock *sk) < { < struct sk_buff *skb = sk->tp_pinfo.af_tcp.send_head; < < if (skb != NULL) < __cluster_data_snd_check(sk, skb); < tcp_check_space(sk); < } < 3061,3088d3029 < < /* Ira Burton < * Check if sending an ack is needed, this is identical to TCP < * code excpet it calls cluster functions. < */ < static __inline__ void __cluster_ack_snd_check(struct sock *sk, int ofo_possible) < { < struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp); < < /* More than one full frame received... */ < if (((tp->rcv_nxt - tp->rcv_wup) > tp->ack.rcv_mss < /* ... and right edge of window advances far enough. < * (tcp_recvmsg() will send ACK otherwise). Or... < */ < && __tcp_select_window(sk) >= tp->rcv_wnd) || < /* We ACK each frame or... */ < tcp_in_quickack_mode(tp) || < /* We have out of order data. */ < (ofo_possible && < skb_peek(&tp->out_of_order_queue) != NULL)) { < /* Then ack it now */ < cluster_send_ack(sk); < } else { < /* Else, send delayed ack. */ < cluster_send_delayed_ack(sk); < } < } < 3099,3112d3039 < /* Ira Burton < * Check to see if we have an ACK scheduled, and if so, see < * if we should send. < */ < static __inline__ void cluster_ack_snd_check(struct sock *sk) < { < struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp); < if (!tcp_ack_scheduled(tp)) { < /* We sent a data segment already. */ < return; < } < __cluster_ack_snd_check(sk, 1); < } < 3241,3265d3167 < < /* Ira Burton < * Copies data from the skb to the iovec, doesn't do any checksumming. < */ < static __inline__ int cluster_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) < { < struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp); < int chunk = skb->len - hlen; < int err = 0; < < local_bh_enable(); < < err = skb_copy_datagram_iovec(skb, hlen, tp->ucopy.iov, chunk); < < if (!err) { < tp->ucopy.len -= chunk; < tp->copied_seq += chunk; < } < < local_bh_disable(); < return err; < } < < < 3524,3665d3425 < < < < /* Ira Burton < * Handles the messages once the connection is established. This is called when data < * is received via the device, not by user space code making a recv() call. Very similiar < * to the TCP code except it calls cluster functions and doesn't support urgent data. < */ < int cluster_rcv_established(struct sock *sk, struct sk_buff *skb, < struct tcphdr *th, unsigned len) < { < struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp); < < < tp->saw_tstamp = 0; < /* pred_flags is 0xS?10 << 16 + snd_wnd < * if header_predition is to be made < * 'S' will always be tp->tcp_header_len >> 2 < * '?' will be 0 for the fast path, otherwise pred_flags is 0 to < * turn it off (when there are holes in the receive < * space for instance) < * PSH flag is ignored. < */ < < if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags && < TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { < int tcp_header_len = tp->tcp_header_len; < < /* Timestamp header prediction: tcp_header_len < * is automatically equal to th->doff*4 due to pred_flags < * match. < */ < < /* Check timestamp */ < if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) { < __u32 *ptr = (__u32 *)(th + 1); < < < tp->saw_tstamp = 1; < ++ptr; < tp->rcv_tsval = ntohl(*ptr); < ++ptr; < tp->rcv_tsecr = ntohl(*ptr); < < < < /* Predicted packet is in window by definition. < * seq == rcv_nxt and rcv_wup <= rcv_nxt. < * Hence, check seq<=rcv_wup reduces to: < */ < if (tp->rcv_nxt == tp->rcv_wup) < tcp_store_ts_recent(tp); < } < < if (len <= tcp_header_len) { < /* Bulk data transfer: sender */ < if (len == tcp_header_len) { < /* We know that such packets are checksummed < * on entry. < */ < tcp_ack(sk, skb, 0); < __kfree_skb(skb); < cluster_data_snd_check(sk); < return 0; < } else { /* Header too small */ < TCP_INC_STATS_BH(TcpInErrs); < goto discard; < } < } else { < < int eaten = 0; < < if (tp->ucopy.task == current && < tp->copied_seq == tp->rcv_nxt && < len - tcp_header_len <= tp->ucopy.len && < sk->lock.users) { < __set_current_state(TASK_RUNNING); < < if (!cluster_copy_to_iovec(sk, skb, tcp_header_len)) { < __skb_pull(skb, tcp_header_len); < tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; < NET_INC_STATS_BH(TCPHPHitsToUser); < eaten = 1; < } < } < if (!eaten) { < if ((int)skb->truesize > sk->forward_alloc) < goto step5; < < NET_INC_STATS_BH(TCPHPHits); < < /* Bulk data transfer: receiver */ < __skb_pull(skb,tcp_header_len); < __skb_queue_tail(&sk->receive_queue, skb); < tcp_set_owner_r(skb, sk); < tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; < } < < if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { < /* Well, only one small jumplet in fast path... */ < tcp_ack(sk, skb, FLAG_DATA); < cluster_data_snd_check(sk); < if (!tcp_ack_scheduled(tp)) < goto no_ack; < } < < if (eaten) { < if (tcp_in_quickack_mode(tp)) { < cluster_send_ack(sk); < } else { < cluster_send_delayed_ack(sk); < } < } else { < __cluster_ack_snd_check(sk, 0); < } < < no_ack: < if (eaten) < __kfree_skb(skb); < else < sk->data_ready(sk, 0); < return 0; < } < } < < step5: < if(th->ack) < tcp_ack(sk, skb, FLAG_SLOWPATH); < < tcp_data_queue(sk, skb); < < cluster_data_snd_check(sk); < cluster_ack_snd_check(sk); < return 0; < < discard: < __kfree_skb(skb); < return 0; < } < < <