blob: 00fbf1419ec6bd8138b0793a0ada11fedf441a7d [file] [log] [blame]
Kyle Swenson8d8f6542021-03-15 11:02:55 -06001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
8 * Copyright Darryl Miles G7LED (dlm@g7led.demon.co.uk)
9 */
10#include <linux/errno.h>
11#include <linux/types.h>
12#include <linux/socket.h>
13#include <linux/in.h>
14#include <linux/kernel.h>
15#include <linux/timer.h>
16#include <linux/string.h>
17#include <linux/sockios.h>
18#include <linux/net.h>
19#include <linux/slab.h>
20#include <net/ax25.h>
21#include <linux/inet.h>
22#include <linux/netdevice.h>
23#include <linux/skbuff.h>
24#include <net/sock.h>
25#include <linux/uaccess.h>
26#include <linux/fcntl.h>
27#include <linux/mm.h>
28#include <linux/interrupt.h>
29#include <net/netrom.h>
30
31/*
32 * This is where all NET/ROM frames pass, except for IP-over-NET/ROM which
33 * cannot be fragmented in this manner.
34 */
35void nr_output(struct sock *sk, struct sk_buff *skb)
36{
37 struct sk_buff *skbn;
38 unsigned char transport[NR_TRANSPORT_LEN];
39 int err, frontlen, len;
40
41 if (skb->len - NR_TRANSPORT_LEN > NR_MAX_PACKET_SIZE) {
42 /* Save a copy of the Transport Header */
43 skb_copy_from_linear_data(skb, transport, NR_TRANSPORT_LEN);
44 skb_pull(skb, NR_TRANSPORT_LEN);
45
46 frontlen = skb_headroom(skb);
47
48 while (skb->len > 0) {
49 if ((skbn = sock_alloc_send_skb(sk, frontlen + NR_MAX_PACKET_SIZE, 0, &err)) == NULL)
50 return;
51
52 skb_reserve(skbn, frontlen);
53
54 len = (NR_MAX_PACKET_SIZE > skb->len) ? skb->len : NR_MAX_PACKET_SIZE;
55
56 /* Copy the user data */
57 skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
58 skb_pull(skb, len);
59
60 /* Duplicate the Transport Header */
61 skb_push(skbn, NR_TRANSPORT_LEN);
62 skb_copy_to_linear_data(skbn, transport,
63 NR_TRANSPORT_LEN);
64 if (skb->len > 0)
65 skbn->data[4] |= NR_MORE_FLAG;
66
67 skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
68 }
69
70 kfree_skb(skb);
71 } else {
72 skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */
73 }
74
75 nr_kick(sk);
76}
77
78/*
79 * This procedure is passed a buffer descriptor for an iframe. It builds
80 * the rest of the control part of the frame and then writes it out.
81 */
82static void nr_send_iframe(struct sock *sk, struct sk_buff *skb)
83{
84 struct nr_sock *nr = nr_sk(sk);
85
86 if (skb == NULL)
87 return;
88
89 skb->data[2] = nr->vs;
90 skb->data[3] = nr->vr;
91
92 if (nr->condition & NR_COND_OWN_RX_BUSY)
93 skb->data[4] |= NR_CHOKE_FLAG;
94
95 nr_start_idletimer(sk);
96
97 nr_transmit_buffer(sk, skb);
98}
99
100void nr_send_nak_frame(struct sock *sk)
101{
102 struct sk_buff *skb, *skbn;
103 struct nr_sock *nr = nr_sk(sk);
104
105 if ((skb = skb_peek(&nr->ack_queue)) == NULL)
106 return;
107
108 if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL)
109 return;
110
111 skbn->data[2] = nr->va;
112 skbn->data[3] = nr->vr;
113
114 if (nr->condition & NR_COND_OWN_RX_BUSY)
115 skbn->data[4] |= NR_CHOKE_FLAG;
116
117 nr_transmit_buffer(sk, skbn);
118
119 nr->condition &= ~NR_COND_ACK_PENDING;
120 nr->vl = nr->vr;
121
122 nr_stop_t1timer(sk);
123}
124
125void nr_kick(struct sock *sk)
126{
127 struct nr_sock *nr = nr_sk(sk);
128 struct sk_buff *skb, *skbn;
129 unsigned short start, end;
130
131 if (nr->state != NR_STATE_3)
132 return;
133
134 if (nr->condition & NR_COND_PEER_RX_BUSY)
135 return;
136
137 if (!skb_peek(&sk->sk_write_queue))
138 return;
139
140 start = (skb_peek(&nr->ack_queue) == NULL) ? nr->va : nr->vs;
141 end = (nr->va + nr->window) % NR_MODULUS;
142
143 if (start == end)
144 return;
145
146 nr->vs = start;
147
148 /*
149 * Transmit data until either we're out of data to send or
150 * the window is full.
151 */
152
153 /*
154 * Dequeue the frame and copy it.
155 */
156 skb = skb_dequeue(&sk->sk_write_queue);
157
158 do {
159 if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
160 skb_queue_head(&sk->sk_write_queue, skb);
161 break;
162 }
163
164 skb_set_owner_w(skbn, sk);
165
166 /*
167 * Transmit the frame copy.
168 */
169 nr_send_iframe(sk, skbn);
170
171 nr->vs = (nr->vs + 1) % NR_MODULUS;
172
173 /*
174 * Requeue the original data frame.
175 */
176 skb_queue_tail(&nr->ack_queue, skb);
177
178 } while (nr->vs != end &&
179 (skb = skb_dequeue(&sk->sk_write_queue)) != NULL);
180
181 nr->vl = nr->vr;
182 nr->condition &= ~NR_COND_ACK_PENDING;
183
184 if (!nr_t1timer_running(sk))
185 nr_start_t1timer(sk);
186}
187
188void nr_transmit_buffer(struct sock *sk, struct sk_buff *skb)
189{
190 struct nr_sock *nr = nr_sk(sk);
191 unsigned char *dptr;
192
193 /*
194 * Add the protocol byte and network header.
195 */
196 dptr = skb_push(skb, NR_NETWORK_LEN);
197
198 memcpy(dptr, &nr->source_addr, AX25_ADDR_LEN);
199 dptr[6] &= ~AX25_CBIT;
200 dptr[6] &= ~AX25_EBIT;
201 dptr[6] |= AX25_SSSID_SPARE;
202 dptr += AX25_ADDR_LEN;
203
204 memcpy(dptr, &nr->dest_addr, AX25_ADDR_LEN);
205 dptr[6] &= ~AX25_CBIT;
206 dptr[6] |= AX25_EBIT;
207 dptr[6] |= AX25_SSSID_SPARE;
208 dptr += AX25_ADDR_LEN;
209
210 *dptr++ = sysctl_netrom_network_ttl_initialiser;
211
212 if (!nr_route_frame(skb, NULL)) {
213 kfree_skb(skb);
214 nr_disconnect(sk, ENETUNREACH);
215 }
216}
217
218/*
219 * The following routines are taken from page 170 of the 7th ARRL Computer
220 * Networking Conference paper, as is the whole state machine.
221 */
222
223void nr_establish_data_link(struct sock *sk)
224{
225 struct nr_sock *nr = nr_sk(sk);
226
227 nr->condition = 0x00;
228 nr->n2count = 0;
229
230 nr_write_internal(sk, NR_CONNREQ);
231
232 nr_stop_t2timer(sk);
233 nr_stop_t4timer(sk);
234 nr_stop_idletimer(sk);
235 nr_start_t1timer(sk);
236}
237
238/*
239 * Never send a NAK when we are CHOKEd.
240 */
241void nr_enquiry_response(struct sock *sk)
242{
243 struct nr_sock *nr = nr_sk(sk);
244 int frametype = NR_INFOACK;
245
246 if (nr->condition & NR_COND_OWN_RX_BUSY) {
247 frametype |= NR_CHOKE_FLAG;
248 } else {
249 if (skb_peek(&nr->reseq_queue) != NULL)
250 frametype |= NR_NAK_FLAG;
251 }
252
253 nr_write_internal(sk, frametype);
254
255 nr->vl = nr->vr;
256 nr->condition &= ~NR_COND_ACK_PENDING;
257}
258
259void nr_check_iframes_acked(struct sock *sk, unsigned short nr)
260{
261 struct nr_sock *nrom = nr_sk(sk);
262
263 if (nrom->vs == nr) {
264 nr_frames_acked(sk, nr);
265 nr_stop_t1timer(sk);
266 nrom->n2count = 0;
267 } else {
268 if (nrom->va != nr) {
269 nr_frames_acked(sk, nr);
270 nr_start_t1timer(sk);
271 }
272 }
273}