forked from opennetworkinglab/ngsdn-tutorial
-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.p4
626 lines (540 loc) · 19.5 KB
/
main.p4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
/*
* Copyright 2019-present Open Networking Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <core.p4>
#include <v1model.p4>
// CPU_PORT specifies the P4 port number associated to controller packet-in and
// packet-out. All packets forwarded via this port will be delivered to the
// controller as P4Runtime PacketIn messages. Similarly, PacketOut messages from
// the controller will be seen by the P4 pipeline as coming from the CPU_PORT.
#define CPU_PORT 255
// CPU_CLONE_SESSION_ID specifies the mirroring session for packets to be cloned
// to the CPU port. Packets associated with this session ID will be cloned to
// the CPU_PORT as well as being transmitted via their egress port (set by the
// bridging/routing/acl table). For cloning to work, the P4Runtime controller
// needs first to insert a CloneSessionEntry that maps this session ID to the
// CPU_PORT.
#define CPU_CLONE_SESSION_ID 99
// Maximum number of hops supported when using SRv6.
// Required for Exercise 7.
#define SRV6_MAX_HOPS 4
typedef bit<9> port_num_t;
typedef bit<48> mac_addr_t;
typedef bit<16> mcast_group_id_t;
typedef bit<32> ipv4_addr_t;
typedef bit<128> ipv6_addr_t;
typedef bit<16> l4_port_t;
const bit<16> ETHERTYPE_IPV4 = 0x0800;
const bit<16> ETHERTYPE_IPV6 = 0x86dd;
const bit<8> IP_PROTO_ICMP = 1;
const bit<8> IP_PROTO_TCP = 6;
const bit<8> IP_PROTO_UDP = 17;
const bit<8> IP_PROTO_SRV6 = 43;
const bit<8> IP_PROTO_ICMPV6 = 58;
const mac_addr_t IPV6_MCAST_01 = 0x33_33_00_00_00_01;
const bit<8> ICMP6_TYPE_NS = 135;
const bit<8> ICMP6_TYPE_NA = 136;
const bit<8> NDP_OPT_TARGET_LL_ADDR = 2;
const bit<32> NDP_FLAG_ROUTER = 0x80000000;
const bit<32> NDP_FLAG_SOLICITED = 0x40000000;
const bit<32> NDP_FLAG_OVERRIDE = 0x20000000;
//------------------------------------------------------------------------------
// HEADER DEFINITIONS
//------------------------------------------------------------------------------
header ethernet_t {
mac_addr_t dst_addr;
mac_addr_t src_addr;
bit<16> ether_type;
}
header ipv4_t {
bit<4> version;
bit<4> ihl;
bit<6> dscp;
bit<2> ecn;
bit<16> total_len;
bit<16> identification;
bit<3> flags;
bit<13> frag_offset;
bit<8> ttl;
bit<8> protocol;
bit<16> hdr_checksum;
bit<32> src_addr;
bit<32> dst_addr;
}
header ipv6_t {
bit<4> version;
bit<8> traffic_class;
bit<20> flow_label;
bit<16> payload_len;
bit<8> next_hdr;
bit<8> hop_limit;
bit<128> src_addr;
bit<128> dst_addr;
}
header srv6h_t {
bit<8> next_hdr;
bit<8> hdr_ext_len;
bit<8> routing_type;
bit<8> segment_left;
bit<8> last_entry;
bit<8> flags;
bit<16> tag;
}
header srv6_list_t {
bit<128> segment_id;
}
header tcp_t {
bit<16> src_port;
bit<16> dst_port;
bit<32> seq_no;
bit<32> ack_no;
bit<4> data_offset;
bit<3> res;
bit<3> ecn;
bit<6> ctrl;
bit<16> window;
bit<16> checksum;
bit<16> urgent_ptr;
}
header udp_t {
bit<16> src_port;
bit<16> dst_port;
bit<16> len;
bit<16> checksum;
}
header icmp_t {
bit<8> type;
bit<8> icmp_code;
bit<16> checksum;
bit<16> identifier;
bit<16> sequence_number;
bit<64> timestamp;
}
header icmpv6_t {
bit<8> type;
bit<8> code;
bit<16> checksum;
}
header ndp_t {
bit<32> flags;
ipv6_addr_t target_ipv6_addr;
// NDP option.
bit<8> type;
bit<8> length;
bit<48> target_mac_addr;
}
// Packet-in header. Prepended to packets sent to the CPU_PORT and used by the
// P4Runtime server (Stratum) to populate the PacketIn message metadata fields.
// Here we use it to carry the original ingress port where the packet was
// received.
@controller_header("packet_in")
header cpu_in_header_t {
port_num_t ingress_port;
bit<7> _pad;
}
// Packet-out header. Prepended to packets received from the CPU_PORT. Fields of
// this header are populated by the P4Runtime server based on the P4Runtime
// PacketOut metadata fields. Here we use it to inform the P4 pipeline on which
// port this packet-out should be transmitted.
@controller_header("packet_out")
header cpu_out_header_t {
port_num_t egress_port;
bit<7> _pad;
}
struct parsed_headers_t {
cpu_out_header_t cpu_out;
cpu_in_header_t cpu_in;
ethernet_t ethernet;
ipv4_t ipv4;
ipv6_t ipv6;
srv6h_t srv6h;
srv6_list_t[SRV6_MAX_HOPS] srv6_list;
tcp_t tcp;
udp_t udp;
icmp_t icmp;
icmpv6_t icmpv6;
ndp_t ndp;
}
struct local_metadata_t {
l4_port_t l4_src_port;
l4_port_t l4_dst_port;
bool is_multicast;
ipv6_addr_t next_srv6_sid;
bit<8> ip_proto;
bit<8> icmp_type;
}
//------------------------------------------------------------------------------
// INGRESS PIPELINE
//------------------------------------------------------------------------------
parser ParserImpl (packet_in packet,
out parsed_headers_t hdr,
inout local_metadata_t local_metadata,
inout standard_metadata_t standard_metadata)
{
state start {
transition select(standard_metadata.ingress_port) {
CPU_PORT: parse_packet_out;
default: parse_ethernet;
}
}
state parse_packet_out {
packet.extract(hdr.cpu_out);
transition parse_ethernet;
}
state parse_ethernet {
packet.extract(hdr.ethernet);
transition select(hdr.ethernet.ether_type){
ETHERTYPE_IPV4: parse_ipv4;
ETHERTYPE_IPV6: parse_ipv6;
default: accept;
}
}
state parse_ipv4 {
packet.extract(hdr.ipv4);
local_metadata.ip_proto = hdr.ipv4.protocol;
transition select(hdr.ipv4.protocol) {
IP_PROTO_TCP: parse_tcp;
IP_PROTO_UDP: parse_udp;
IP_PROTO_ICMP: parse_icmp;
default: accept;
}
}
state parse_ipv6 {
packet.extract(hdr.ipv6);
local_metadata.ip_proto = hdr.ipv6.next_hdr;
transition select(hdr.ipv6.next_hdr) {
IP_PROTO_TCP: parse_tcp;
IP_PROTO_UDP: parse_udp;
IP_PROTO_ICMPV6: parse_icmpv6;
IP_PROTO_SRV6: parse_srv6;
default: accept;
}
}
state parse_tcp {
packet.extract(hdr.tcp);
local_metadata.l4_src_port = hdr.tcp.src_port;
local_metadata.l4_dst_port = hdr.tcp.dst_port;
transition accept;
}
state parse_udp {
packet.extract(hdr.udp);
local_metadata.l4_src_port = hdr.udp.src_port;
local_metadata.l4_dst_port = hdr.udp.dst_port;
transition accept;
}
state parse_icmp {
packet.extract(hdr.icmp);
local_metadata.icmp_type = hdr.icmp.type;
transition accept;
}
state parse_icmpv6 {
packet.extract(hdr.icmpv6);
local_metadata.icmp_type = hdr.icmpv6.type;
transition select(hdr.icmpv6.type) {
ICMP6_TYPE_NS: parse_ndp;
ICMP6_TYPE_NA: parse_ndp;
default: accept;
}
}
state parse_ndp {
packet.extract(hdr.ndp);
transition accept;
}
state parse_srv6 {
packet.extract(hdr.srv6h);
transition parse_srv6_list;
}
state parse_srv6_list {
packet.extract(hdr.srv6_list.next);
bool next_segment = (bit<32>)hdr.srv6h.segment_left - 1 == (bit<32>)hdr.srv6_list.lastIndex;
transition select(next_segment) {
true: mark_current_srv6;
default: check_last_srv6;
}
}
state mark_current_srv6 {
local_metadata.next_srv6_sid = hdr.srv6_list.last.segment_id;
transition check_last_srv6;
}
state check_last_srv6 {
// working with bit<8> and int<32> which cannot be cast directly; using
// bit<32> as common intermediate type for comparision
bool last_segment = (bit<32>)hdr.srv6h.last_entry == (bit<32>)hdr.srv6_list.lastIndex;
transition select(last_segment) {
true: parse_srv6_next_hdr;
false: parse_srv6_list;
}
}
state parse_srv6_next_hdr {
transition select(hdr.srv6h.next_hdr) {
IP_PROTO_TCP: parse_tcp;
IP_PROTO_UDP: parse_udp;
IP_PROTO_ICMPV6: parse_icmpv6;
default: accept;
}
}
}
control VerifyChecksumImpl(inout parsed_headers_t hdr,
inout local_metadata_t meta)
{
// Not used here. We assume all packets have valid checksum, if not, we let
// the end hosts detect errors.
apply { /* EMPTY */ }
}
control IngressPipeImpl (inout parsed_headers_t hdr,
inout local_metadata_t local_metadata,
inout standard_metadata_t standard_metadata) {
// Drop action shared by many tables.
action drop() {
mark_to_drop(standard_metadata);
}
// *** L2 BRIDGING
//
// Here we define tables to forward packets based on their Ethernet
// destination address. There are two types of L2 entries that we
// need to support:
//
// 1. Unicast entries: which will be filled in by the control plane when the
// location (port) of new hosts is learned.
// 2. Broadcast/multicast entries: used replicate NDP Neighbor Solicitation
// (NS) messages to all host-facing ports;
//
// For (2), unlike ARP messages in IPv4 which are broadcasted to Ethernet
// destination address FF:FF:FF:FF:FF:FF, NDP messages are sent to special
// Ethernet addresses specified by RFC2464. These addresses are prefixed
// with 33:33 and the last four octets are the last four octets of the IPv6
// destination multicast address. The most straightforward way of matching
// on such IPv6 broadcast/multicast packets, without digging in the details
// of RFC2464, is to use a ternary match on 33:33:**:**:**:**, where * means
// "don't care".
//
// For this reason, our solution defines two tables. One that matches in an
// exact fashion (easier to scale on switch ASIC memory) and one that uses
// ternary matching (which requires more expensive TCAM memories, usually
// much smaller).
// --- l2_exact_table (for unicast entries) --------------------------------
action set_egress_port(port_num_t port_num) {
standard_metadata.egress_spec = port_num;
}
table l2_exact_table {
key = {
hdr.ethernet.dst_addr: exact;
}
actions = {
set_egress_port;
@defaultonly drop;
}
const default_action = drop;
// The @name annotation is used here to provide a name to this table
// counter, as it will be needed by the compiler to generate the
// corresponding P4Info entity.
@name("l2_exact_table_counter")
counters = direct_counter(CounterType.packets_and_bytes);
}
// --- l2_ternary_table (for broadcast/multicast entries) ------------------
action set_multicast_group(mcast_group_id_t gid) {
// gid will be used by the Packet Replication Engine (PRE) in the
// Traffic Manager--located right after the ingress pipeline, to
// replicate a packet to multiple egress ports, specified by the control
// plane by means of P4Runtime MulticastGroupEntry messages.
standard_metadata.mcast_grp = gid;
local_metadata.is_multicast = true;
}
table l2_ternary_table {
key = {
hdr.ethernet.dst_addr: ternary;
}
actions = {
set_multicast_group;
@defaultonly drop;
}
const default_action = drop;
@name("l2_ternary_table_counter")
counters = direct_counter(CounterType.packets_and_bytes);
}
// *** TODO EXERCISE 5 (IPV6 ROUTING)
//
// 1. Create a table to to handle NDP messages to resolve the MAC address of
// switch. This table should:
// - match on hdr.ndp.target_ipv6_addr (exact match)
// - provide action "ndp_ns_to_na" (look in snippets.p4)
// - default_action should be "NoAction"
//
// 2. Create table to handle IPv6 routing. Create a L2 my station table (hit
// when Ethernet destination address is the switch address). This table
// should not do anything to the packet (i.e., NoAction), but the control
// block below should use the result (table.hit) to decide how to process
// the packet.
//
// 3. Create a table for IPv6 routing. An action selector should be use to
// pick a next hop MAC address according to a hash of packet header
// fields (IPv6 source/destination address and the flow label). Look in
// snippets.p4 for an example of an action selector and table using it.
//
// You can name your tables whatever you like. You will need to fill
// the name in elsewhere in this exercise.
// *** TODO EXERCISE 6 (SRV6)
//
// Implement tables to provide SRV6 logic.
// *** ACL
//
// Provides ways to override a previous forwarding decision, for example
// requiring that a packet is cloned/sent to the CPU, or dropped.
//
// We use this table to clone all NDP packets to the control plane, so to
// enable host discovery. When the location of a new host is discovered, the
// controller is expected to update the L2 and L3 tables with the
// corresponding bridging and routing entries.
action send_to_cpu() {
standard_metadata.egress_spec = CPU_PORT;
}
action clone_to_cpu() {
// Cloning is achieved by using a v1model-specific primitive. Here we
// set the type of clone operation (ingress-to-egress pipeline), the
// clone session ID (the CPU one), and the metadata fields we want to
// preserve for the cloned packet replica.
clone3(CloneType.I2E, CPU_CLONE_SESSION_ID, { standard_metadata.ingress_port });
}
table acl_table {
key = {
standard_metadata.ingress_port: ternary;
hdr.ethernet.dst_addr: ternary;
hdr.ethernet.src_addr: ternary;
hdr.ethernet.ether_type: ternary;
local_metadata.ip_proto: ternary;
local_metadata.icmp_type: ternary;
local_metadata.l4_src_port: ternary;
local_metadata.l4_dst_port: ternary;
}
actions = {
send_to_cpu;
clone_to_cpu;
drop;
}
@name("acl_table_counter")
counters = direct_counter(CounterType.packets_and_bytes);
}
apply {
if (hdr.cpu_out.isValid()) {
// *** TODO EXERCISE 4
// Implement logic such that if this is a packet-out from the
// controller:
// 1. Set the packet egress port to that found in the cpu_out header
// 2. Remove (set invalid) the cpu_out header
// 3. Exit the pipeline here (no need to go through other tables
}
bool do_l3_l2 = true;
if (hdr.icmpv6.isValid() && hdr.icmpv6.type == ICMP6_TYPE_NS) {
// *** TODO EXERCISE 5
// Insert logic to handle NDP messages to resolve the MAC address of the
// switch. You should apply the NDP reply table created before.
// If this is an NDP NS packet, i.e., if a matching entry is found,
// unset the "do_l3_l2" flag to skip the L3 and L2 tables, as the
// "ndp_ns_to_na" action already set an egress port.
}
if (do_l3_l2) {
// *** TODO EXERCISE 5
// Insert logic to match the My Station table and upon hit, the
// routing table. You should also add a conditional to drop the
// packet if the hop_limit reaches 0.
// *** TODO EXERCISE 6
// Insert logic to match the SRv6 My SID and Transit tables as well
// as logic to perform PSP behavior. HINT: This logic belongs
// somewhere between checking the switch's my station table and
// applying the routing table.
// L2 bridging logic. Apply the exact table first...
if (!l2_exact_table.apply().hit) {
// ...if an entry is NOT found, apply the ternary one in case
// this is a multicast/broadcast NDP NS packet.
l2_ternary_table.apply();
}
}
// Lastly, apply the ACL table.
acl_table.apply();
}
}
control EgressPipeImpl (inout parsed_headers_t hdr,
inout local_metadata_t local_metadata,
inout standard_metadata_t standard_metadata) {
apply {
if (standard_metadata.egress_port == CPU_PORT) {
// *** TODO EXERCISE 4
// Implement logic such that if the packet is to be forwarded to the
// CPU port, e.g., if in ingress we matched on the ACL table with
// action send/clone_to_cpu...
// 1. Set cpu_in header as valid
// 2. Set the cpu_in.ingress_port field to the original packet's
// ingress port (standard_metadata.ingress_port).
}
// If this is a multicast packet (flag set by l2_ternary_table), make
// sure we are not replicating the packet on the same port where it was
// received. This is useful to avoid broadcasting NDP requests on the
// ingress port.
if (local_metadata.is_multicast == true &&
standard_metadata.ingress_port == standard_metadata.egress_port) {
mark_to_drop(standard_metadata);
}
}
}
control ComputeChecksumImpl(inout parsed_headers_t hdr,
inout local_metadata_t local_metadata)
{
apply {
// The following is used to update the ICMPv6 checksum of NDP
// NA packets generated by the ndp reply table in the ingress pipeline.
// This function is executed only if the NDP header is present.
update_checksum(hdr.ndp.isValid(),
{
hdr.ipv6.src_addr,
hdr.ipv6.dst_addr,
hdr.ipv6.payload_len,
8w0,
hdr.ipv6.next_hdr,
hdr.icmpv6.type,
hdr.icmpv6.code,
hdr.ndp.flags,
hdr.ndp.target_ipv6_addr,
hdr.ndp.type,
hdr.ndp.length,
hdr.ndp.target_mac_addr
},
hdr.icmpv6.checksum,
HashAlgorithm.csum16
);
}
}
control DeparserImpl(packet_out packet, in parsed_headers_t hdr) {
apply {
packet.emit(hdr.cpu_in);
packet.emit(hdr.ethernet);
packet.emit(hdr.ipv4);
packet.emit(hdr.ipv6);
packet.emit(hdr.srv6h);
packet.emit(hdr.srv6_list);
packet.emit(hdr.tcp);
packet.emit(hdr.udp);
packet.emit(hdr.icmp);
packet.emit(hdr.icmpv6);
packet.emit(hdr.ndp);
}
}
V1Switch(
ParserImpl(),
VerifyChecksumImpl(),
IngressPipeImpl(),
EgressPipeImpl(),
ComputeChecksumImpl(),
DeparserImpl()
) main;