Merge tag 'libnvdimm-for-4.19_misc' of gitolite.kernel.org:pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / net / x25 / x25_subr.c
1 /*
2  *      X.25 Packet Layer release 002
3  *
4  *      This is ALPHA test software. This code may break your machine,
5  *      randomly fail to work with new releases, misbehave and/or generally
6  *      screw up. It might even work.
7  *
8  *      This code REQUIRES 2.1.15 or higher
9  *
10  *      This module:
11  *              This module is free software; you can redistribute it and/or
12  *              modify it under the terms of the GNU General Public License
13  *              as published by the Free Software Foundation; either version
14  *              2 of the License, or (at your option) any later version.
15  *
16  *      History
17  *      X.25 001        Jonathan Naylor   Started coding.
18  *      X.25 002        Jonathan Naylor   Centralised disconnection processing.
19  *      mar/20/00       Daniela Squassoni Disabling/enabling of facilities
20  *                                        negotiation.
21  *      jun/24/01       Arnaldo C. Melo   use skb_queue_purge, cleanups
22  *      apr/04/15       Shaun Pereira           Fast select with no
23  *                                              restriction on response.
24  */
25
26 #define pr_fmt(fmt) "X25: " fmt
27
28 #include <linux/slab.h>
29 #include <linux/kernel.h>
30 #include <linux/string.h>
31 #include <linux/skbuff.h>
32 #include <net/sock.h>
33 #include <net/tcp_states.h>
34 #include <net/x25.h>
35
36 /*
37  *      This routine purges all of the queues of frames.
38  */
39 void x25_clear_queues(struct sock *sk)
40 {
41         struct x25_sock *x25 = x25_sk(sk);
42
43         skb_queue_purge(&sk->sk_write_queue);
44         skb_queue_purge(&x25->ack_queue);
45         skb_queue_purge(&x25->interrupt_in_queue);
46         skb_queue_purge(&x25->interrupt_out_queue);
47         skb_queue_purge(&x25->fragment_queue);
48 }
49
50
51 /*
52  * This routine purges the input queue of those frames that have been
53  * acknowledged. This replaces the boxes labelled "V(a) <- N(r)" on the
54  * SDL diagram.
55 */
56 void x25_frames_acked(struct sock *sk, unsigned short nr)
57 {
58         struct sk_buff *skb;
59         struct x25_sock *x25 = x25_sk(sk);
60         int modulus = x25->neighbour->extended ? X25_EMODULUS : X25_SMODULUS;
61
62         /*
63          * Remove all the ack-ed frames from the ack queue.
64          */
65         if (x25->va != nr)
66                 while (skb_peek(&x25->ack_queue) && x25->va != nr) {
67                         skb = skb_dequeue(&x25->ack_queue);
68                         kfree_skb(skb);
69                         x25->va = (x25->va + 1) % modulus;
70                 }
71 }
72
73 void x25_requeue_frames(struct sock *sk)
74 {
75         struct sk_buff *skb, *skb_prev = NULL;
76
77         /*
78          * Requeue all the un-ack-ed frames on the output queue to be picked
79          * up by x25_kick. This arrangement handles the possibility of an empty
80          * output queue.
81          */
82         while ((skb = skb_dequeue(&x25_sk(sk)->ack_queue)) != NULL) {
83                 if (!skb_prev)
84                         skb_queue_head(&sk->sk_write_queue, skb);
85                 else
86                         skb_append(skb_prev, skb, &sk->sk_write_queue);
87                 skb_prev = skb;
88         }
89 }
90
91 /*
92  *      Validate that the value of nr is between va and vs. Return true or
93  *      false for testing.
94  */
95 int x25_validate_nr(struct sock *sk, unsigned short nr)
96 {
97         struct x25_sock *x25 = x25_sk(sk);
98         unsigned short vc = x25->va;
99         int modulus = x25->neighbour->extended ? X25_EMODULUS : X25_SMODULUS;
100
101         while (vc != x25->vs) {
102                 if (nr == vc)
103                         return 1;
104                 vc = (vc + 1) % modulus;
105         }
106
107         return nr == x25->vs ? 1 : 0;
108 }
109
110 /*
111  *  This routine is called when the packet layer internally generates a
112  *  control frame.
113  */
114 void x25_write_internal(struct sock *sk, int frametype)
115 {
116         struct x25_sock *x25 = x25_sk(sk);
117         struct sk_buff *skb;
118         unsigned char  *dptr;
119         unsigned char  facilities[X25_MAX_FAC_LEN];
120         unsigned char  addresses[1 + X25_ADDR_LEN];
121         unsigned char  lci1, lci2;
122         /*
123          *      Default safe frame size.
124          */
125         int len = X25_MAX_L2_LEN + X25_EXT_MIN_LEN;
126
127         /*
128          *      Adjust frame size.
129          */
130         switch (frametype) {
131         case X25_CALL_REQUEST:
132                 len += 1 + X25_ADDR_LEN + X25_MAX_FAC_LEN + X25_MAX_CUD_LEN;
133                 break;
134         case X25_CALL_ACCEPTED: /* fast sel with no restr on resp */
135                 if (x25->facilities.reverse & 0x80) {
136                         len += 1 + X25_MAX_FAC_LEN + X25_MAX_CUD_LEN;
137                 } else {
138                         len += 1 + X25_MAX_FAC_LEN;
139                 }
140                 break;
141         case X25_CLEAR_REQUEST:
142         case X25_RESET_REQUEST:
143                 len += 2;
144                 break;
145         case X25_RR:
146         case X25_RNR:
147         case X25_REJ:
148         case X25_CLEAR_CONFIRMATION:
149         case X25_INTERRUPT_CONFIRMATION:
150         case X25_RESET_CONFIRMATION:
151                 break;
152         default:
153                 pr_err("invalid frame type %02X\n", frametype);
154                 return;
155         }
156
157         if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
158                 return;
159
160         /*
161          *      Space for Ethernet and 802.2 LLC headers.
162          */
163         skb_reserve(skb, X25_MAX_L2_LEN);
164
165         /*
166          *      Make space for the GFI and LCI, and fill them in.
167          */
168         dptr = skb_put(skb, 2);
169
170         lci1 = (x25->lci >> 8) & 0x0F;
171         lci2 = (x25->lci >> 0) & 0xFF;
172
173         if (x25->neighbour->extended) {
174                 *dptr++ = lci1 | X25_GFI_EXTSEQ;
175                 *dptr++ = lci2;
176         } else {
177                 *dptr++ = lci1 | X25_GFI_STDSEQ;
178                 *dptr++ = lci2;
179         }
180
181         /*
182          *      Now fill in the frame type specific information.
183          */
184         switch (frametype) {
185
186                 case X25_CALL_REQUEST:
187                         dptr    = skb_put(skb, 1);
188                         *dptr++ = X25_CALL_REQUEST;
189                         len     = x25_addr_aton(addresses, &x25->dest_addr,
190                                                 &x25->source_addr);
191                         skb_put_data(skb, addresses, len);
192                         len     = x25_create_facilities(facilities,
193                                         &x25->facilities,
194                                         &x25->dte_facilities,
195                                         x25->neighbour->global_facil_mask);
196                         skb_put_data(skb, facilities, len);
197                         skb_put_data(skb, x25->calluserdata.cuddata,
198                                      x25->calluserdata.cudlength);
199                         x25->calluserdata.cudlength = 0;
200                         break;
201
202                 case X25_CALL_ACCEPTED:
203                         dptr    = skb_put(skb, 2);
204                         *dptr++ = X25_CALL_ACCEPTED;
205                         *dptr++ = 0x00;         /* Address lengths */
206                         len     = x25_create_facilities(facilities,
207                                                         &x25->facilities,
208                                                         &x25->dte_facilities,
209                                                         x25->vc_facil_mask);
210                         skb_put_data(skb, facilities, len);
211
212                         /* fast select with no restriction on response
213                                 allows call user data. Userland must
214                                 ensure it is ours and not theirs */
215                         if(x25->facilities.reverse & 0x80) {
216                                 skb_put_data(skb,
217                                              x25->calluserdata.cuddata,
218                                              x25->calluserdata.cudlength);
219                         }
220                         x25->calluserdata.cudlength = 0;
221                         break;
222
223                 case X25_CLEAR_REQUEST:
224                         dptr    = skb_put(skb, 3);
225                         *dptr++ = frametype;
226                         *dptr++ = x25->causediag.cause;
227                         *dptr++ = x25->causediag.diagnostic;
228                         break;
229
230                 case X25_RESET_REQUEST:
231                         dptr    = skb_put(skb, 3);
232                         *dptr++ = frametype;
233                         *dptr++ = 0x00;         /* XXX */
234                         *dptr++ = 0x00;         /* XXX */
235                         break;
236
237                 case X25_RR:
238                 case X25_RNR:
239                 case X25_REJ:
240                         if (x25->neighbour->extended) {
241                                 dptr     = skb_put(skb, 2);
242                                 *dptr++  = frametype;
243                                 *dptr++  = (x25->vr << 1) & 0xFE;
244                         } else {
245                                 dptr     = skb_put(skb, 1);
246                                 *dptr    = frametype;
247                                 *dptr++ |= (x25->vr << 5) & 0xE0;
248                         }
249                         break;
250
251                 case X25_CLEAR_CONFIRMATION:
252                 case X25_INTERRUPT_CONFIRMATION:
253                 case X25_RESET_CONFIRMATION:
254                         dptr  = skb_put(skb, 1);
255                         *dptr = frametype;
256                         break;
257         }
258
259         x25_transmit_link(skb, x25->neighbour);
260 }
261
262 /*
263  *      Unpick the contents of the passed X.25 Packet Layer frame.
264  */
265 int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
266                int *d, int *m)
267 {
268         struct x25_sock *x25 = x25_sk(sk);
269         unsigned char *frame;
270
271         if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
272                 return X25_ILLEGAL;
273         frame = skb->data;
274
275         *ns = *nr = *q = *d = *m = 0;
276
277         switch (frame[2]) {
278         case X25_CALL_REQUEST:
279         case X25_CALL_ACCEPTED:
280         case X25_CLEAR_REQUEST:
281         case X25_CLEAR_CONFIRMATION:
282         case X25_INTERRUPT:
283         case X25_INTERRUPT_CONFIRMATION:
284         case X25_RESET_REQUEST:
285         case X25_RESET_CONFIRMATION:
286         case X25_RESTART_REQUEST:
287         case X25_RESTART_CONFIRMATION:
288         case X25_REGISTRATION_REQUEST:
289         case X25_REGISTRATION_CONFIRMATION:
290         case X25_DIAGNOSTIC:
291                 return frame[2];
292         }
293
294         if (x25->neighbour->extended) {
295                 if (frame[2] == X25_RR  ||
296                     frame[2] == X25_RNR ||
297                     frame[2] == X25_REJ) {
298                         if (!pskb_may_pull(skb, X25_EXT_MIN_LEN))
299                                 return X25_ILLEGAL;
300                         frame = skb->data;
301
302                         *nr = (frame[3] >> 1) & 0x7F;
303                         return frame[2];
304                 }
305         } else {
306                 if ((frame[2] & 0x1F) == X25_RR  ||
307                     (frame[2] & 0x1F) == X25_RNR ||
308                     (frame[2] & 0x1F) == X25_REJ) {
309                         *nr = (frame[2] >> 5) & 0x07;
310                         return frame[2] & 0x1F;
311                 }
312         }
313
314         if (x25->neighbour->extended) {
315                 if ((frame[2] & 0x01) == X25_DATA) {
316                         if (!pskb_may_pull(skb, X25_EXT_MIN_LEN))
317                                 return X25_ILLEGAL;
318                         frame = skb->data;
319
320                         *q  = (frame[0] & X25_Q_BIT) == X25_Q_BIT;
321                         *d  = (frame[0] & X25_D_BIT) == X25_D_BIT;
322                         *m  = (frame[3] & X25_EXT_M_BIT) == X25_EXT_M_BIT;
323                         *nr = (frame[3] >> 1) & 0x7F;
324                         *ns = (frame[2] >> 1) & 0x7F;
325                         return X25_DATA;
326                 }
327         } else {
328                 if ((frame[2] & 0x01) == X25_DATA) {
329                         *q  = (frame[0] & X25_Q_BIT) == X25_Q_BIT;
330                         *d  = (frame[0] & X25_D_BIT) == X25_D_BIT;
331                         *m  = (frame[2] & X25_STD_M_BIT) == X25_STD_M_BIT;
332                         *nr = (frame[2] >> 5) & 0x07;
333                         *ns = (frame[2] >> 1) & 0x07;
334                         return X25_DATA;
335                 }
336         }
337
338         pr_debug("invalid PLP frame %3ph\n", frame);
339
340         return X25_ILLEGAL;
341 }
342
343 void x25_disconnect(struct sock *sk, int reason, unsigned char cause,
344                     unsigned char diagnostic)
345 {
346         struct x25_sock *x25 = x25_sk(sk);
347
348         x25_clear_queues(sk);
349         x25_stop_timer(sk);
350
351         x25->lci   = 0;
352         x25->state = X25_STATE_0;
353
354         x25->causediag.cause      = cause;
355         x25->causediag.diagnostic = diagnostic;
356
357         sk->sk_state     = TCP_CLOSE;
358         sk->sk_err       = reason;
359         sk->sk_shutdown |= SEND_SHUTDOWN;
360
361         if (!sock_flag(sk, SOCK_DEAD)) {
362                 sk->sk_state_change(sk);
363                 sock_set_flag(sk, SOCK_DEAD);
364         }
365 }
366
367 /*
368  * Clear an own-rx-busy condition and tell the peer about this, provided
369  * that there is a significant amount of free receive buffer space available.
370  */
371 void x25_check_rbuf(struct sock *sk)
372 {
373         struct x25_sock *x25 = x25_sk(sk);
374
375         if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf >> 1) &&
376             (x25->condition & X25_COND_OWN_RX_BUSY)) {
377                 x25->condition &= ~X25_COND_OWN_RX_BUSY;
378                 x25->condition &= ~X25_COND_ACK_PENDING;
379                 x25->vl         = x25->vr;
380                 x25_write_internal(sk, X25_RR);
381                 x25_stop_timer(sk);
382         }
383 }