28d27 < * Ira Burton : Support for SOCK_CLUSTER 1024,1035d1022 < /* Ira Burton < * Sets the checksum values for the packet, cluster protocol doesn't use checksums. < */ < inline void cluster_send_check(struct sock *sk, struct tcphdr *th, int len, < struct sk_buff *skb) < { < th->check = 0; < skb->csum = 0; < skb->ip_summed = CHECKSUM_UNNECESSARY; < } < < 1614,1658d1600 < < < /* Ira Burton < * Recieved data from the device, if the connection is established, cluster < * protocol will handle it. Otherwise, let the TCP code worry about it. < */ < int cluster_do_rcv(struct sock *sk, struct sk_buff *skb) < { < IP_INC_STATS_BH(IpInDelivers); < < if (sk->state == TCP_ESTABLISHED) { < if (cluster_rcv_established(sk, skb, skb->h.th, skb->len)) < goto reset; < return 0; < } < < if (sk->state == TCP_LISTEN) { < struct sock *nsk = tcp_v4_hnd_req(sk, skb); < if (!nsk) < goto discard; < < if (nsk != sk) { < if (tcp_child_process(sk, nsk, skb)) < goto reset; < return 0; < } < } < < if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len)) < goto reset; < return 0; < < reset: < tcp_v4_send_reset(skb); < discard: < kfree_skb(skb); < /* Be careful here. If this function gets more complicated and < * gcc suffers from register pressure on the x86, sk (in %ebx) < * might be destroyed here. This current version compiles correctly, < * but you have been warned. < */ < return 0; < } < < 1774,1887d1715 < < /* Ira Burton < * Called when data read from the device. As long as the message looks good, < * it is handled. < */ < int cluster_rcv(struct sk_buff *skb) < { < < struct tcphdr *th; < struct sock *sk; < int ret; < < if (skb->pkt_type!=PACKET_HOST) < goto discard_it; < < /* Count it even if it's bad */ < TCP_INC_STATS_BH(TcpInSegs); < < if (!pskb_may_pull(skb, sizeof(struct tcphdr))) < goto discard_it; < < th = skb->h.th; < < if (th->doff < sizeof(struct tcphdr)/4) < goto bad_packet; < if (!pskb_may_pull(skb, th->doff*4)) < goto discard_it; < < skb->ip_summed = CHECKSUM_UNNECESSARY; < th = skb->h.th; < TCP_SKB_CB(skb)->seq = ntohl(th->seq); < TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + < skb->len - th->doff*4); < TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); < TCP_SKB_CB(skb)->when = 0; < TCP_SKB_CB(skb)->flags = skb->nh.iph->tos; < TCP_SKB_CB(skb)->sacked = 0; < < sk = __tcp_v4_lookup(skb->nh.iph->saddr, th->source, < skb->nh.iph->daddr, ntohs(th->dest), tcp_v4_iif(skb)); < if (!sk) < goto no_tcp_socket; < < process: < if (sk->state == TCP_TIME_WAIT) < goto do_time_wait; < < skb->dev = NULL; < < bh_lock_sock(sk); < ret = 0; < if (!sk->lock.users) { < if (!tcp_prequeue(sk, skb)) < ret = cluster_do_rcv(sk, skb); < } else < sk_add_backlog(sk, skb); < bh_unlock_sock(sk); < < sock_put(sk); < < return ret; < < no_tcp_socket: < if (skb->len < (th->doff<<2)) { < bad_packet: < < TCP_INC_STATS_BH(TcpInErrs); < } else { < < tcp_v4_send_reset(skb); < } < < discard_it: < /* Discard frame. */ < kfree_skb(skb); < return 0; < < discard_and_relse: < sock_put(sk); < goto discard_it; < < do_time_wait: < if (skb->len < (th->doff<<2)) { < TCP_INC_STATS_BH(TcpInErrs); < goto discard_and_relse; < } < switch(tcp_timewait_state_process((struct tcp_tw_bucket *)sk, < skb, th, skb->len)) { < case TCP_TW_SYN: < { < struct sock *sk2; < < sk2 = tcp_v4_lookup_listener(skb->nh.iph->daddr, ntohs(th->dest), tcp_v4_iif(skb)); < if (sk2 != NULL) { < tcp_tw_deschedule((struct tcp_tw_bucket *)sk); < tcp_timewait_kill((struct tcp_tw_bucket *)sk); < tcp_tw_put((struct tcp_tw_bucket *)sk); < sk = sk2; < goto process; < } < /* Fall through to ACK */ < } < case TCP_TW_ACK: < tcp_v4_timewait_ack(sk, skb); < break; < case TCP_TW_RST: < goto no_tcp_socket; < case TCP_TW_SUCCESS:; < } < goto discard_it; < } < < < 2061,2081d1888 < /* Ira Burton, Added for cluster */ < struct tcp_func cluster_specific = { < cluster_ip_queue_xmit, < cluster_send_check, < tcp_v4_rebuild_header, < tcp_v4_conn_request, < tcp_v4_syn_recv_sock, < tcp_v4_hash_connecting, < tcp_v4_remember_stamp, < sizeof(struct iphdr), < < ip_setsockopt, < ip_getsockopt, < v4_addr2sockaddr, < sizeof(struct sockaddr_in) < }; < < < < < 2127,2175d1933 < /* Ira Burton < * initializes the socket to use rthe cluster protocol. Similiar to < * TCP code except sets the af_specific pointer to cluster_specific. < */ < static int cluster_init_sock(struct sock *sk) < { < struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp); < < skb_queue_head_init(&tp->out_of_order_queue); < tcp_init_xmit_timers(sk); < tcp_prequeue_init(tp); < < tp->rto = TCP_TIMEOUT_INIT; < tp->mdev = TCP_TIMEOUT_INIT; < < /* So many TCP implementations out there (incorrectly) count the < * initial SYN frame in their delayed-ACK and congestion control < * algorithms that we must have the following bandaid to talk < * efficiently to them. -DaveM < */ < tp->snd_cwnd = 2; < < /* See draft-stevens-tcpca-spec-01 for discussion of the < * initialization of these values. < */ < tp->snd_ssthresh = 0x7fffffff; /* Infinity */ < tp->snd_cwnd_clamp = ~0; < tp->mss_cache = 536; < < tp->reordering = sysctl_tcp_reordering; < < sk->state = TCP_CLOSE; < < sk->write_space = tcp_write_space; < sk->use_write_queue = 1; < < sk->tp_pinfo.af_tcp.af_specific = &cluster_specific; < < sk->sndbuf = sysctl_tcp_wmem[1]; < sk->rcvbuf = sysctl_tcp_rmem[1]; < < atomic_inc(&tcp_sockets_allocated); < < return 0; < } < < < < 2435,2456d2192 < < < /* Ira Burton, added for cluster. */ < struct proto cluster_prot = { < name: "CLUSTER", < close: tcp_close, < connect: tcp_v4_connect, < disconnect: tcp_disconnect, < accept: tcp_accept, < ioctl: tcp_ioctl, < init: cluster_init_sock, < destroy: tcp_v4_destroy_sock, < shutdown: tcp_shutdown, < setsockopt: tcp_setsockopt, < getsockopt: tcp_getsockopt, < sendmsg: cluster_sendmsg, < recvmsg: cluster_recvmsg, < backlog_rcv: cluster_do_rcv, < hash: tcp_v4_hash, < unhash: tcp_unhash, < get_port: tcp_v4_get_port, < };