-
Notifications
You must be signed in to change notification settings - Fork 270
/
Copy pathsystem-ovn-kmod.at
1055 lines (849 loc) · 41.2 KB
/
system-ovn-kmod.at
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
AT_BANNER([system-ovn-kmod])
OVN_FOR_EACH_NORTHD([
AT_SETUP([load balancing affinity sessions - IPv4])
AT_KEYWORDS([ovnlb])
CHECK_CONNTRACK()
CHECK_CONNTRACK_NAT()
ovn_start
OVS_TRAFFIC_VSWITCHD_START()
ADD_BR([br-int])
# Set external-ids in br-int needed for ovn-controller
ovs-vsctl \
-- set Open_vSwitch . external-ids:system-id=hv1 \
-- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
-- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
-- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
-- set bridge br-int fail-mode=secure other-config:disable-in-band=true
# Start ovn-controller
start_daemon ovn-controller
# Logical network:
# Two LRs - R1 and R2 that are connected to each other via LS "join"
# in 20.0.0.0/24 network. R1 has switchess foo (192.168.1.0/24) and
# bar (192.168.2.0/24) connected to it. R2 has alice (172.16.1.0/24) connected
# to it. R2 is a gateway router on which we add load-balancing rules.
#
# foo -- R1 -- join - R2 -- alice
# |
# bar ----
ovn-nbctl create Logical_Router name=R1
ovn-nbctl create Logical_Router name=R2 options:chassis=hv1
ovn-nbctl ls-add foo
ovn-nbctl ls-add bar
ovn-nbctl ls-add alice
ovn-nbctl ls-add join
# Connect foo to R1
ovn-nbctl lrp-add R1 foo 00:00:01:01:02:03 192.168.1.1/24
ovn-nbctl lsp-add foo rp-foo -- set Logical_Switch_Port rp-foo \
type=router options:router-port=foo addresses=\"00:00:01:01:02:03\"
# Connect bar to R1
ovn-nbctl lrp-add R1 bar 00:00:01:01:02:04 192.168.2.1/24
ovn-nbctl lsp-add bar rp-bar -- set Logical_Switch_Port rp-bar \
type=router options:router-port=bar addresses=\"00:00:01:01:02:04\"
# Connect alice to R2
ovn-nbctl lrp-add R2 alice 00:00:02:01:02:03 172.16.1.1/24
ovn-nbctl lsp-add alice rp-alice -- set Logical_Switch_Port rp-alice \
type=router options:router-port=alice addresses=\"00:00:02:01:02:03\"
# Connect R1 to join
ovn-nbctl lrp-add R1 R1_join 00:00:04:01:02:03 20.0.0.1/24
ovn-nbctl lsp-add join r1-join -- set Logical_Switch_Port r1-join \
type=router options:router-port=R1_join addresses='"00:00:04:01:02:03"'
# Connect R2 to join
ovn-nbctl lrp-add R2 R2_join 00:00:04:01:02:04 20.0.0.2/24
ovn-nbctl lsp-add join r2-join -- set Logical_Switch_Port r2-join \
type=router options:router-port=R2_join addresses='"00:00:04:01:02:04"'
# Static routes.
ovn-nbctl lr-route-add R1 172.16.1.0/24 20.0.0.2
ovn-nbctl lr-route-add R2 192.168.0.0/16 20.0.0.1
# Logical port 'foo1' in switch 'foo'.
ADD_NAMESPACES(foo1)
ADD_VETH(foo1, foo1, br-int, "192.168.1.2/24", "f0:00:00:01:02:03", \
"192.168.1.1")
ovn-nbctl lsp-add foo foo1 \
-- lsp-set-addresses foo1 "f0:00:00:01:02:03 192.168.1.2"
# Logical port 'alice1' in switch 'alice'.
ADD_NAMESPACES(alice1)
ADD_VETH(alice1, alice1, br-int, "172.16.1.2/24", "f0:00:00:01:02:04", \
"172.16.1.1")
ovn-nbctl lsp-add alice alice1 \
-- lsp-set-addresses alice1 "f0:00:00:01:02:04 172.16.1.2"
# Logical port 'bar1' in switch 'bar'.
ADD_NAMESPACES(bar1)
ADD_VETH(bar1, bar1, br-int, "192.168.2.2/24", "f0:00:00:01:02:05", \
"192.168.2.1")
ovn-nbctl lsp-add bar bar1 \
-- lsp-set-addresses bar1 "f0:00:00:01:02:05 192.168.2.2"
ADD_NAMESPACES(bar2)
ADD_VETH(bar2, bar2, br-int, "192.168.2.3/24", "e0:00:00:01:02:05", \
"192.168.2.1")
ovn-nbctl lsp-add bar bar2 \
-- lsp-set-addresses bar2 "e0:00:00:01:02:05 192.168.2.3"
# Config OVN load-balancer with a VIP.
ovn-nbctl lb-add lb0 172.16.1.100:8080 192.168.1.2:80,192.168.2.2:80
ovn-nbctl lb-add lb10 172.16.1.110:8080 192.168.1.2:80,192.168.2.2:80
ovn-nbctl lb-add lb0-no-aff 172.16.1.100:8081 192.168.1.2:80,192.168.2.2:80
ovn-nbctl lb-add lb10-no-aff 172.16.1.110:8081 192.168.1.2:80,192.168.2.2:80
ovn-nbctl lr-lb-add R2 lb0
ovn-nbctl lr-lb-add R2 lb10
ovn-nbctl lr-lb-add R2 lb0-no-aff
ovn-nbctl lr-lb-add R2 lb10-no-aff
# Start webservers in 'foo1', 'bar1'.
NETNS_DAEMONIZE([foo1], [nc -l -k 192.168.1.2 80], [nc-foo1.pid])
NETNS_DAEMONIZE([bar1], [nc -l -k 192.168.2.2 80], [nc-bar1.pid])
# Wait for ovn-controller to catch up.
ovn-nbctl --wait=hv sync
OVS_WAIT_UNTIL([ovs-ofctl -O OpenFlow13 dump-groups br-int | \
grep 'nat(dst=192.168.2.2:80)'])
dnl Should work with the virtual IP address through NAT
OVS_WAIT_FOR_OUTPUT([
for i in $(seq 1 5); do
NS_EXEC([alice1], [nc -z 172.16.1.100 8080])
done
dnl Each server should have at least one connection.
ovs-appctl dpctl/dump-conntrack | FORMAT_CT(172.16.1.100) | \
sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
tcp,orig=(src=172.16.1.2,dst=172.16.1.100,sport=<cleared>,dport=<cleared>),reply=(src=192.168.1.2,dst=172.16.1.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
tcp,orig=(src=172.16.1.2,dst=172.16.1.100,sport=<cleared>,dport=<cleared>),reply=(src=192.168.2.2,dst=172.16.1.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
])
# Flush conntrack entries for easier output parsing of next test.
AT_CHECK([ovs-appctl dpctl/flush-conntrack])
# Enable lb affinity
ovn-nbctl --wait=sb set load_balancer lb0 options:affinity_timeout=60
ovn-nbctl --wait=sb set load_balancer lb10 options:affinity_timeout=60
for i in $(seq 1 15); do
echo Request $i
NS_CHECK_EXEC([alice1], [nc -z 172.16.1.100 8080])
done
dnl here we should have just one entry in the ct table
AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(172.16.1.100) |
sed -e 's/zone=[[0-9]]*/zone=<cleared>/; s/src=192.168.[[0-9]].2/src=192.168.<cleared>.2/'], [0], [dnl
tcp,orig=(src=172.16.1.2,dst=172.16.1.100,sport=<cleared>,dport=<cleared>),reply=(src=192.168.<cleared>.2,dst=172.16.1.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
])
dp_key=$(printf "0x%x" $(fetch_column datapath tunnel_key external_ids:name=R2))
AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=78 --no-stats | strip_cookie | sed -e 's/load:0xc0a80[[0-9]]02/load:0xc0a80<cleared>02/'], [0], [dnl
table=78, idle_timeout=60, tcp,metadata=$dp_key,nw_src=172.16.1.2,nw_dst=172.16.1.100,tp_dst=8080 actions=load:0x1->NXM_NX_REG10[[14]],load:0xc0a80<cleared>02->NXM_NX_REG4[[]],load:0x50->NXM_NX_REG8[[0..15]]
])
check_affinity_flows () {
n1=$(ovs-ofctl dump-flows br-int table=15 |awk '/priority=150,ct_state=\+new\+trk,ip,reg4=0xc0a80102,.*nw_dst=172.16.1.100/{print substr($4,11,length($4)-11)}')
n2=$(ovs-ofctl dump-flows br-int table=15 |awk '/priority=150,ct_state=\+new\+trk,ip,reg4=0xc0a80202,.*nw_dst=172.16.1.100/{print substr($4,11,length($4)-11)}')
[[ $n1 -gt 0 -a $n2 -eq 0 ]] || [[ $n1 -eq 0 -a $n2 -gt 0 ]]
echo $?
}
AT_CHECK([test $(check_affinity_flows) -eq 0])
NS_CHECK_EXEC([alice1], [nc -z 172.16.1.100 8081])
# Flush conntrack entries for easier output parsing of next test.
AT_CHECK([ovs-appctl dpctl/flush-conntrack])
ovn-nbctl lb-add lb1 172.16.1.101:8080 192.168.1.2:80,192.168.2.2:80
ovn-nbctl lb-add lb11 172.16.1.111:8080 192.168.1.2:80,192.168.2.2:80
ovn-nbctl lb-add lb1-no-aff 172.16.1.101:8081 192.168.1.2:80,192.168.2.2:80
ovn-nbctl lb-add lb11-no-aff 172.16.1.111:8081 192.168.1.2:80,192.168.2.2:80
# Enable lb affinity
ovn-nbctl --wait=sb set load_balancer lb1 options:affinity_timeout=3
ovn-nbctl --wait=sb set load_balancer lb11 options:affinity_timeout=3
ovn-nbctl lr-lb-add R2 lb1
ovn-nbctl lr-lb-add R2 lb11
ovn-nbctl lr-lb-add R2 lb1-no-aff
ovn-nbctl lr-lb-add R2 lb11-no-aff
# check we use both backends
OVS_WAIT_FOR_OUTPUT([
for i in $(seq 1 5); do
NS_EXEC([alice1], [nc -z 172.16.1.101 8080])
ovs-ofctl del-flows br-int table=78
done
dnl Each server should have at least one connection.
ovs-appctl dpctl/dump-conntrack | FORMAT_CT(172.16.1.101) | \
sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
tcp,orig=(src=172.16.1.2,dst=172.16.1.101,sport=<cleared>,dport=<cleared>),reply=(src=192.168.1.2,dst=172.16.1.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
tcp,orig=(src=172.16.1.2,dst=172.16.1.101,sport=<cleared>,dport=<cleared>),reply=(src=192.168.2.2,dst=172.16.1.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
])
NS_CHECK_EXEC([alice1], [nc -z 172.16.1.101 8081])
# Flush conntrack entries for easier output parsing of next test.
AT_CHECK([ovs-appctl dpctl/flush-conntrack])
NETNS_DAEMONIZE([bar2], [nc -l -k 192.168.2.3 80], [nc-bar2.pid])
ovn-nbctl lb-add lb2 192.168.2.100:8080 192.168.2.2:80,192.168.2.3:80
ovn-nbctl lb-add lb20 192.168.2.120:8080 192.168.2.2:80,192.168.2.3:80
ovn-nbctl lb-add lb2-no-aff 192.168.2.100:8081 192.168.2.2:80,192.168.2.3:80
ovn-nbctl lb-add lb20-no-aff 192.168.2.120:8081 192.168.2.2:80,192.168.2.3:80
ovn-nbctl --wait=sb set load_balancer lb2 options:affinity_timeout=60
ovn-nbctl --wait=sb set load_balancer lb20 options:affinity_timeout=60
ovn-nbctl ls-lb-add foo lb2
ovn-nbctl ls-lb-add foo lb20
ovn-nbctl ls-lb-add foo lb2-no-aff
ovn-nbctl ls-lb-add foo lb20-no-aff
for i in $(seq 1 15); do
echo Request $i
NS_CHECK_EXEC([foo1], [nc -z 192.168.2.100 8080])
done
AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(192.168.2.100) |
sed -e 's/zone=[[0-9]]*/zone=<cleared>/; s/src=192.168.2.[[0-9]]/src=192.168.2.<cleared>/'], [0], [dnl
tcp,orig=(src=192.168.1.2,dst=192.168.2.100,sport=<cleared>,dport=<cleared>),reply=(src=192.168.2.<cleared>,dst=192.168.1.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
])
NS_CHECK_EXEC([foo1], [nc -z 192.168.2.100 8081])
# Flush conntrack entries for easier output parsing of next test.
AT_CHECK([ovs-appctl dpctl/flush-conntrack])
ovn-nbctl lb-add lb3 192.168.2.101:8080 192.168.2.2:80,192.168.2.3:80
ovn-nbctl lb-add lb30 192.168.2.131:8080 192.168.2.2:80,192.168.2.3:80
ovn-nbctl lb-add lb3-no-aff 192.168.2.101:8081 192.168.2.2:80,192.168.2.3:80
ovn-nbctl lb-add lb30-no-aff 192.168.2.131:8081 192.168.2.2:80,192.168.2.3:80
ovn-nbctl --wait=sb set load_balancer lb3 options:affinity_timeout=3
ovn-nbctl --wait=sb set load_balancer lb30 options:affinity_timeout=3
ovn-nbctl ls-lb-add foo lb3
ovn-nbctl ls-lb-add foo lb30
ovn-nbctl ls-lb-add foo lb3-no-aff
ovn-nbctl ls-lb-add foo lb30-no-aff
# Flush conntrack entries for easier output parsing of next test.
AT_CHECK([ovs-appctl dpctl/flush-conntrack])
OVS_WAIT_FOR_OUTPUT([
for i in $(seq 1 5); do
NS_EXEC([foo1], [nc -z 192.168.2.101 8080])
ovs-ofctl del-flows br-int table=78
done
dnl Each server should have at least one connection.
ovs-appctl dpctl/dump-conntrack | FORMAT_CT(192.168.2.101) | \
sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
tcp,orig=(src=192.168.1.2,dst=192.168.2.101,sport=<cleared>,dport=<cleared>),reply=(src=192.168.2.2,dst=192.168.1.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
tcp,orig=(src=192.168.1.2,dst=192.168.2.101,sport=<cleared>,dport=<cleared>),reply=(src=192.168.2.3,dst=192.168.1.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
])
NS_CHECK_EXEC([foo1], [nc -z 192.168.2.101 8081])
NS_CHECK_EXEC([foo1], [ip neigh add 192.168.1.200 lladdr 00:00:01:01:02:03 dev foo1], [0])
ovn-nbctl lb-add lb4 192.168.1.100:8080 192.168.1.2:80
ovn-nbctl lb-add lb40 192.168.1.140:8080 192.168.1.2:80
ovn-nbctl lb-add lb4-no-aff 192.168.1.100:8081 192.168.1.2:80
ovn-nbctl lb-add lb40-no-aff 192.168.1.140:8081 192.168.1.2:80
ovn-nbctl --wait=sb set load_balancer lb4 options:affinity_timeout=60 options:hairpin_snat_ip=192.168.1.200
ovn-nbctl --wait=sb set load_balancer lb40 options:affinity_timeout=60 options:hairpin_snat_ip=192.168.1.200
ovn-nbctl ls-lb-add foo lb4
ovn-nbctl ls-lb-add foo lb40
ovn-nbctl lr-lb-add R1 lb4
ovn-nbctl lr-lb-add R1 lb40
ovn-nbctl ls-lb-add foo lb4-no-aff
ovn-nbctl ls-lb-add foo lb40-no-aff
ovn-nbctl lr-lb-add R1 lb4-no-aff
ovn-nbctl lr-lb-add R1 lb40-no-aff
# Flush conntrack entries for easier output parsing of next test.
AT_CHECK([ovs-appctl dpctl/flush-conntrack])
for i in $(seq 1 15); do
echo Request $i
NS_CHECK_EXEC([foo1], [nc -z 192.168.1.100 8080])
done
dnl Each server should have at least one connection.
AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(192.168.1.2) |
sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
tcp,orig=(src=192.168.1.2,dst=192.168.1.100,sport=<cleared>,dport=<cleared>),reply=(src=192.168.1.2,dst=192.168.1.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
tcp,orig=(src=192.168.1.2,dst=192.168.1.2,sport=<cleared>,dport=<cleared>),reply=(src=192.168.1.2,dst=192.168.1.200,sport=<cleared>,dport=<cleared>),zone=<cleared>,protoinfo=(state=<cleared>)
tcp,orig=(src=192.168.1.200,dst=192.168.1.2,sport=<cleared>,dport=<cleared>),reply=(src=192.168.1.2,dst=192.168.1.200,sport=<cleared>,dport=<cleared>),zone=<cleared>,protoinfo=(state=<cleared>)
])
NS_CHECK_EXEC([foo1], [nc -z 192.168.1.100 8081])
OVS_APP_EXIT_AND_WAIT([ovn-controller])
as ovn-sb
OVS_APP_EXIT_AND_WAIT([ovsdb-server])
as ovn-nb
OVS_APP_EXIT_AND_WAIT([ovsdb-server])
as northd
OVS_APP_EXIT_AND_WAIT([ovn-northd])
as
OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
/connection dropped.*/d
/inactivity probe*/d"])
AT_CLEANUP
])
OVN_FOR_EACH_NORTHD([
AT_SETUP([load balancing affinity sessions - IPv6])
AT_KEYWORDS([ovnlb])
CHECK_CONNTRACK()
CHECK_CONNTRACK_NAT()
ovn_start
OVS_TRAFFIC_VSWITCHD_START()
ADD_BR([br-int])
# Set external-ids in br-int needed for ovn-controller
ovs-vsctl \
-- set Open_vSwitch . external-ids:system-id=hv1 \
-- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
-- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
-- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
-- set bridge br-int fail-mode=secure other-config:disable-in-band=true
# Start ovn-controller
start_daemon ovn-controller
# Logical network:
# Two LRs - R1 and R2 that are connected to each other via LS "join"
# in fd20::/64 network. R1 has switchess foo (fd11::/64) and
# bar (fd12::/64) connected to it. R2 has alice (fd72::/64) connected
# to it. R2 is a gateway router on which we add load-balancing rules.
#
# foo -- R1 -- join - R2 -- alice
# |
# bar ----
ovn-nbctl create Logical_Router name=R1
ovn-nbctl create Logical_Router name=R2 options:chassis=hv1
ovn-nbctl ls-add foo
ovn-nbctl ls-add bar
ovn-nbctl ls-add alice
ovn-nbctl ls-add join
# Connect foo to R1
ovn-nbctl lrp-add R1 foo 00:00:01:01:02:03 fd11::1/64
ovn-nbctl lsp-add foo rp-foo -- set Logical_Switch_Port rp-foo \
type=router options:router-port=foo addresses=\"00:00:01:01:02:03\"
# Connect bar to R1
ovn-nbctl lrp-add R1 bar 00:00:01:01:02:04 fd12::1/64
ovn-nbctl lsp-add bar rp-bar -- set Logical_Switch_Port rp-bar \
type=router options:router-port=bar addresses=\"00:00:01:01:02:04\"
# Connect alice to R2
ovn-nbctl lrp-add R2 alice 00:00:02:01:02:03 fd72::1/64
ovn-nbctl lsp-add alice rp-alice -- set Logical_Switch_Port rp-alice \
type=router options:router-port=alice addresses=\"00:00:02:01:02:03\"
# Connect R1 to join
ovn-nbctl lrp-add R1 R1_join 00:00:04:01:02:03 fd20::1/64
ovn-nbctl lsp-add join r1-join -- set Logical_Switch_Port r1-join \
type=router options:router-port=R1_join addresses='"00:00:04:01:02:03"'
# Connect R2 to join
ovn-nbctl lrp-add R2 R2_join 00:00:04:01:02:04 fd20::2/64
ovn-nbctl lsp-add join r2-join -- set Logical_Switch_Port r2-join \
type=router options:router-port=R2_join addresses='"00:00:04:01:02:04"'
# Static routes.
ovn-nbctl lr-route-add R1 fd72::/64 fd20::2
ovn-nbctl lr-route-add R2 fd11::/64 fd20::1
ovn-nbctl lr-route-add R2 fd12::/64 fd20::1
# Logical port 'foo1' in switch 'foo'.
ADD_NAMESPACES(foo1)
ADD_VETH(foo1, foo1, br-int, "fd11::2/64", "f0:00:00:01:02:03", \
"fd11::1", "nodad")
ovn-nbctl lsp-add foo foo1 \
-- lsp-set-addresses foo1 "f0:00:00:01:02:03 fd11::2"
# Logical port 'alice1' in switch 'alice'.
ADD_NAMESPACES(alice1)
ADD_VETH(alice1, alice1, br-int, "fd72::2/64", "f0:00:00:01:02:04", \
"fd72::1", "nodad")
ovn-nbctl lsp-add alice alice1 \
-- lsp-set-addresses alice1 "f0:00:00:01:02:04 fd72::2"
# Logical port 'bar1' in switch 'bar'.
ADD_NAMESPACES(bar1)
ADD_VETH(bar1, bar1, br-int, "fd12::2/64", "f0:00:00:01:02:05", \
"fd12::1", "nodad")
ovn-nbctl lsp-add bar bar1 \
-- lsp-set-addresses bar1 "f0:00:00:01:02:05 fd12::2"
ADD_NAMESPACES(bar2)
ADD_VETH(bar2, bar2, br-int, "fd12::3/64", "e0:00:00:01:02:05", \
"fd12::1", "nodad")
ovn-nbctl lsp-add bar bar2 \
-- lsp-set-addresses bar2 "e0:00:00:01:02:05 fd12::3"
ovn-nbctl lb-add lb0 [[fd30::1]]:8080 [[fd11::2]]:80,[[fd12::2]]:80
ovn-nbctl lb-add lb10 [[fd30::10]]:8080 [[fd11::2]]:80,[[fd12::2]]:80
ovn-nbctl lb-add lb0-no-aff [[fd30::1]]:8081 [[fd11::2]]:80,[[fd12::2]]:80
ovn-nbctl lb-add lb10-no-aff [[fd30::10]]:8081 [[fd11::2]]:80,[[fd12::2]]:80
ovn-nbctl lr-lb-add R2 lb0
ovn-nbctl lr-lb-add R2 lb10
ovn-nbctl lr-lb-add R2 lb0-no-aff
ovn-nbctl lr-lb-add R2 lb10-no-aff
# Wait for ovn-controller to catch up.
ovn-nbctl --wait=hv sync
OVS_WAIT_UNTIL([ovs-ofctl -O OpenFlow13 dump-groups br-int | \
grep 'nat(dst=\[[fd11::2\]]:80)'])
# Start webservers in 'foo1', 'bar1'.
NETNS_DAEMONIZE([foo1], [nc -l -k fd11::2 80], [nc-foo1.pid])
NETNS_DAEMONIZE([bar1], [nc -l -k fd12::2 80], [nc-bar1.pid])
dnl Should work with the virtual IP address through NAT
OVS_WAIT_FOR_OUTPUT([
for i in $(seq 1 5); do
NS_EXEC([alice1], [nc -z fd30::1 8080])
done
dnl Each server should have at least one connection.
ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd30::1) | grep -v fe80 | \
sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
tcp,orig=(src=fd72::2,dst=fd30::1,sport=<cleared>,dport=<cleared>),reply=(src=fd11::2,dst=fd72::2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
tcp,orig=(src=fd72::2,dst=fd30::1,sport=<cleared>,dport=<cleared>),reply=(src=fd12::2,dst=fd72::2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
])
NS_CHECK_EXEC([alice1], [nc -z fd30::1 8081])
# Flush conntrack entries for easier output parsing of next test.
AT_CHECK([ovs-appctl dpctl/flush-conntrack])
# Enable lb affinity
ovn-nbctl --wait=sb set load_balancer lb0 options:affinity_timeout=60
ovn-nbctl --wait=sb set load_balancer lb10 options:affinity_timeout=60
for i in $(seq 1 15); do
echo Request $i
NS_CHECK_EXEC([alice1], [nc -z fd30::1 8080])
done
dnl here we should have just one entry in the ct table
AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd30::1) | grep -v fe80 |
sed -e 's/zone=[[0-9]]*/zone=<cleared>/; s/src=fd1[[0-9]]::2/src=fd1<cleared>::2/'], [0], [dnl
tcp,orig=(src=fd72::2,dst=fd30::1,sport=<cleared>,dport=<cleared>),reply=(src=fd1<cleared>::2,dst=fd72::2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
])
dp_key=$(printf "0x%x" $(fetch_column datapath tunnel_key external_ids:name=R2))
AT_CHECK_UNQUOTED([ovs-ofctl dump-flows br-int table=78 --no-stats | strip_cookie | sed -e 's/load:0xfd1[[0-9]]000000000000/load:0xfd1<cleared>000000000000/'], [0], [dnl
table=78, idle_timeout=60, tcp6,metadata=$dp_key,ipv6_src=fd72::2,ipv6_dst=fd30::1,tp_dst=8080 actions=load:0x1->NXM_NX_REG10[[14]],load:0x2->NXM_NX_XXREG1[[0..63]],load:0xfd1<cleared>000000000000->NXM_NX_XXREG1[[64..127]],load:0x50->NXM_NX_REG8[[0..15]]
])
check_affinity_flows () {
n1=$(ovs-ofctl dump-flows br-int table=15 |awk '/priority=150,ct_state=\+new\+trk,ipv6,reg4=0xfd110000,.*ipv6_dst=fd30::1\s/{print substr($4,11,length($4)-11)}')
n2=$(ovs-ofctl dump-flows br-int table=15 |awk '/priority=150,ct_state=\+new\+trk,ipv6,reg4=0xfd120000,.*ipv6_dst=fd30::1\s/{print substr($4,11,length($4)-11)}')
[[ $n1 -gt 0 -a $n2 -eq 0 ]] || [[ $n1 -eq 0 -a $n2 -gt 0 ]]
echo $?
}
AT_CHECK([test $(check_affinity_flows) -eq 0])
NS_CHECK_EXEC([alice1], [nc -z fd30::1 8081])
# Flush conntrack entries for easier output parsing of next test.
AT_CHECK([ovs-appctl dpctl/flush-conntrack])
ovn-nbctl lb-add lb1 [[fd30::2]]:8080 [[fd11::2]]:80,[[fd12::2]]:80
ovn-nbctl lb-add lb11 [[fd30::12]]:8080 [[fd11::2]]:80,[[fd12::2]]:80
ovn-nbctl lb-add lb1-no-aff [[fd30::2]]:8081 [[fd11::2]]:80,[[fd12::2]]:80
ovn-nbctl lb-add lb11-no-aff [[fd30::12]]:8081 [[fd11::2]]:80,[[fd12::2]]:80
# Enable lb affinity
ovn-nbctl --wait=sb set load_balancer lb1 options:affinity_timeout=3
ovn-nbctl --wait=sb set load_balancer lb11 options:affinity_timeout=3
ovn-nbctl lr-lb-add R2 lb1
ovn-nbctl lr-lb-add R2 lb11
ovn-nbctl lr-lb-add R2 lb1-no-aff
ovn-nbctl lr-lb-add R2 lb11-no-aff
# check we use both backends
OVS_WAIT_FOR_OUTPUT([
for i in $(seq 1 5); do
NS_EXEC([alice1], [nc -z fd30::2 8080])
ovs-ofctl del-flows br-int table=78
done
dnl Each server should have at least one connection.
ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd30::2) | grep -v fe80 | \
sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
tcp,orig=(src=fd72::2,dst=fd30::2,sport=<cleared>,dport=<cleared>),reply=(src=fd11::2,dst=fd72::2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
tcp,orig=(src=fd72::2,dst=fd30::2,sport=<cleared>,dport=<cleared>),reply=(src=fd12::2,dst=fd72::2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
])
NS_CHECK_EXEC([alice1], [nc -z fd30::2 8081])
# Flush conntrack entries for easier output parsing of next test.
AT_CHECK([ovs-appctl dpctl/flush-conntrack])
NETNS_DAEMONIZE([bar2], [nc -l -k fd12::3 80], [nc-bar2.pid])
ovn-nbctl lb-add lb2 [[fd12::a]]:8080 [[fd12::2]]:80,[[fd12::3]]:80
ovn-nbctl lb-add lb20 [[fd12::2a]]:8080 [[fd12::2]]:80,[[fd12::3]]:80
ovn-nbctl lb-add lb2-no-aff [[fd12::a]]:8081 [[fd12::2]]:80,[[fd12::3]]:80
ovn-nbctl lb-add lb20-no-aff [[fd12::2a]]:8081 [[fd12::2]]:80,[[fd12::3]]:80
ovn-nbctl --wait=sb set load_balancer lb2 options:affinity_timeout=60
ovn-nbctl --wait=sb set load_balancer lb20 options:affinity_timeout=60
ovn-nbctl ls-lb-add foo lb2
ovn-nbctl ls-lb-add foo lb20
ovn-nbctl ls-lb-add foo lb2-no-aff
ovn-nbctl ls-lb-add foo lb20-no-aff
for i in $(seq 1 15); do
echo Request $i
NS_CHECK_EXEC([foo1], [nc -z fd12::a 8080])
done
dnl here we should have just one entry in the ct table
AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd12::a) | grep -v fe80 |
sed -e 's/zone=[[0-9]]*/zone=<cleared>/; s/src=fd12::[[0-9]]/src=fd12::<cleared>/'], [0], [dnl
tcp,orig=(src=fd11::2,dst=fd12::a,sport=<cleared>,dport=<cleared>),reply=(src=fd12::<cleared>,dst=fd11::2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
])
NS_CHECK_EXEC([foo1], [nc -z fd12::a 8081])
AT_CHECK([ovs-appctl dpctl/flush-conntrack])
ovn-nbctl lb-add lb3 [[fd12::b]]:8080 [[fd12::2]]:80,[[fd12::3]]:80
ovn-nbctl lb-add lb30 [[fd12::3b]]:8080 [[fd12::2]]:80,[[fd12::3]]:80
ovn-nbctl lb-add lb3-no-aff [[fd12::b]]:8081 [[fd12::2]]:80,[[fd12::3]]:80
ovn-nbctl lb-add lb30-no-aff [[fd12::3b]]:8081 [[fd12::2]]:80,[[fd12::3]]:80
ovn-nbctl --wait=sb set load_balancer lb3 options:affinity_timeout=3
ovn-nbctl --wait=sb set load_balancer lb30 options:affinity_timeout=3
ovn-nbctl ls-lb-add foo lb3
ovn-nbctl ls-lb-add foo lb30
ovn-nbctl ls-lb-add foo lb3-no-aff
ovn-nbctl ls-lb-add foo lb30-no-aff
OVS_WAIT_FOR_OUTPUT([
for i in $(seq 1 5); do
NS_EXEC([foo1], [nc -z fd12::b 8080])
ovs-ofctl del-flows br-int table=78
done
ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd12::b) | grep -v fe80 | \
sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
tcp,orig=(src=fd11::2,dst=fd12::b,sport=<cleared>,dport=<cleared>),reply=(src=fd12::2,dst=fd11::2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
tcp,orig=(src=fd11::2,dst=fd12::b,sport=<cleared>,dport=<cleared>),reply=(src=fd12::3,dst=fd11::2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
])
NS_CHECK_EXEC([foo1], [nc -z fd12::b 8081])
NS_CHECK_EXEC([foo1], [ip -6 neigh add fd11::b lladdr 00:00:01:01:02:03 dev foo1], [0])
ovn-nbctl --wait=sb lb-add lb4 [[fd11::a]]:8080 [[fd11::2]]:80
ovn-nbctl --wait=sb lb-add lb40 [[fd11::a]]:8080 [[fd11::2]]:80
ovn-nbctl --wait=sb lb-add lb4-no-aff [[fd11::a]]:8081 [[fd11::2]]:80
ovn-nbctl --wait=sb lb-add lb40-no-aff [[fd11::a]]:8081 [[fd11::2]]:80
ovn-nbctl --wait=sb set load_balancer lb4 options:affinity_timeout=60 options:hairpin_snat_ip="fd11::b"
ovn-nbctl --wait=sb set load_balancer lb40 options:affinity_timeout=60 options:hairpin_snat_ip="fd11::b"
ovn-nbctl ls-lb-add foo lb4
ovn-nbctl ls-lb-add foo lb40
ovn-nbctl lr-lb-add R1 lb4
ovn-nbctl lr-lb-add R1 lb40
ovn-nbctl ls-lb-add foo lb4-no-aff
ovn-nbctl ls-lb-add foo lb40-no-aff
ovn-nbctl lr-lb-add R1 lb4-no-aff
ovn-nbctl lr-lb-add R1 lb40-no-aff
# Flush conntrack entries for easier output parsing of next test.
AT_CHECK([ovs-appctl dpctl/flush-conntrack])
OVS_WAIT_FOR_OUTPUT([
for i in $(seq 1 5); do
NS_EXEC([foo1], [nc -z fd11::a 8080])
done
dnl Each server should have at least one connection.
ovs-appctl dpctl/dump-conntrack | FORMAT_CT(fd11::2) | grep -v fe80 | \
sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
tcp,orig=(src=fd11::2,dst=fd11::2,sport=<cleared>,dport=<cleared>),reply=(src=fd11::2,dst=fd11::b,sport=<cleared>,dport=<cleared>),zone=<cleared>,protoinfo=(state=<cleared>)
tcp,orig=(src=fd11::2,dst=fd11::a,sport=<cleared>,dport=<cleared>),reply=(src=fd11::2,dst=fd11::2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
tcp,orig=(src=fd11::b,dst=fd11::2,sport=<cleared>,dport=<cleared>),reply=(src=fd11::2,dst=fd11::b,sport=<cleared>,dport=<cleared>),zone=<cleared>,protoinfo=(state=<cleared>)
])
NS_CHECK_EXEC([foo1], [nc -z fd11::a 8081])
OVS_APP_EXIT_AND_WAIT([ovn-controller])
as ovn-sb
OVS_APP_EXIT_AND_WAIT([ovsdb-server])
as ovn-nb
OVS_APP_EXIT_AND_WAIT([ovsdb-server])
as northd
OVS_APP_EXIT_AND_WAIT([ovn-northd])
as
OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
/connection dropped.*/d
/inactivity probe*/d"])
AT_CLEANUP
])
OVN_FOR_EACH_NORTHD([
AT_SETUP([LB correctly handles fragmented traffic])
AT_KEYWORDS([ovnlb])
CHECK_CONNTRACK()
CHECK_CONNTRACK_NAT()
ovn_start
OVS_TRAFFIC_VSWITCHD_START()
ADD_BR([br-int])
ADD_BR([br-ext])
# Logical network:
# 2 logical switches "public" (192.168.1.0/24) and "internal" (172.16.1.0/24)
# connected to a router lr.
# internal has a server.
# client is connected through localnet.
check ovs-ofctl add-flow br-ext action=normal
# Set external-ids in br-int needed for ovn-controller
check ovs-vsctl \
-- set Open_vSwitch . external-ids:system-id=hv1 \
-- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
-- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
-- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
-- set bridge br-int fail-mode=secure other-config:disable-in-band=true \
-- set Open_vSwitch . external-ids:ovn-bridge-mappings=phynet:br-ext
# Start ovn-controller
start_daemon ovn-controller
check ovn-nbctl lr-add lr
check ovn-nbctl ls-add internal
check ovn-nbctl ls-add public
check ovn-nbctl lrp-add lr lr-pub 00:00:01:01:02:03 192.168.1.1/24
check ovn-nbctl lsp-add public pub-lr -- set Logical_Switch_Port pub-lr \
type=router options:router-port=lr-pub addresses=\"00:00:01:01:02:03\"
check ovn-nbctl lrp-add lr lr-internal 00:00:01:01:02:04 172.16.1.1/24
check ovn-nbctl lsp-add internal internal-lr -- set Logical_Switch_Port internal-lr \
type=router options:router-port=lr-internal addresses=\"00:00:01:01:02:04\"
ovn-nbctl lsp-add public ln_port \
-- lsp-set-addresses ln_port unknown \
-- lsp-set-type ln_port localnet \
-- lsp-set-options ln_port network_name=phynet
ADD_NAMESPACES(client)
ADD_VETH(client, client, br-ext, "192.168.1.2/24", "f0:00:00:01:02:03", \
"192.168.1.1")
NS_EXEC([client], [ip l set dev client mtu 900])
ADD_NAMESPACES(server)
ADD_VETH(server, server, br-int, "172.16.1.2/24", "f0:00:0f:01:02:03", \
"172.16.1.1")
check ovn-nbctl lsp-add internal server \
-- lsp-set-addresses server "f0:00:0f:01:02:03 172.16.1.2"
check ovn-nbctl set logical_router lr options:chassis=hv1
AT_DATA([client.py], [dnl
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(b"x" * 1000, ("172.16.1.20", 4242))
])
test_fragmented_traffic() {
check ovn-nbctl --wait=hv sync
check ovs-appctl dpctl/flush-conntrack
NETNS_DAEMONIZE([server], [nc -l -u 172.16.1.2 4242 > /dev/null], [server.pid])
# Collect ICMP packets on client side
NETNS_DAEMONIZE([client], [tcpdump -l -U -i client -vnne \
udp > client.pcap 2>client_err], [tcpdump0.pid])
OVS_WAIT_UNTIL([grep "listening" client_err])
# Collect UDP packets on server side
NETNS_DAEMONIZE([server], [tcpdump -l -U -i server -vnne \
'udp and ip[[6:2]] > 0 and not ip[[6]] = 64' > server.pcap 2>server_err], [tcpdump1.pid])
OVS_WAIT_UNTIL([grep "listening" server_err])
NS_CHECK_EXEC([client], [$PYTHON3 ./client.py])
OVS_WAIT_UNTIL([test "$(cat server.pcap | wc -l)" = "4"])
check kill $(cat tcpdump0.pid) $(cat tcpdump1.pid) $(cat server.pid)
}
AS_BOX([LB on router without port and protocol])
check ovn-nbctl lb-add lb1 172.16.1.20 172.16.1.2
check ovn-nbctl lr-lb-add lr lb1
test_fragmented_traffic
check ovn-nbctl lr-lb-del lr
check ovn-nbctl lb-del lb1
AS_BOX([LB on router with port and protocol])
check ovn-nbctl lb-add lb1 172.16.1.20:4242 172.16.1.2:4242 udp
check ovn-nbctl lr-lb-add lr lb1
test_fragmented_traffic
check ovn-nbctl lr-lb-del lr
check ovn-nbctl lb-del lb1
AS_BOX([LB on switch without port and protocol])
check ovn-nbctl lb-add lb1 172.16.1.20 172.16.1.2
check ovn-nbctl ls-lb-add public lb1
test_fragmented_traffic
check ovn-nbctl ls-lb-del public
check ovn-nbctl lb-del lb1
AS_BOX([LB on switch witho port and protocol])
check ovn-nbctl lb-add lb1 172.16.1.20:4242 172.16.1.2:4242 udp
check ovn-nbctl ls-lb-add public lb1
test_fragmented_traffic
check ovn-nbctl ls-lb-del public
check ovn-nbctl lb-del lb1
OVS_APP_EXIT_AND_WAIT([ovn-controller])
as ovn-sb
OVS_APP_EXIT_AND_WAIT([ovsdb-server])
as ovn-nb
OVS_APP_EXIT_AND_WAIT([ovsdb-server])
as northd
OVS_APP_EXIT_AND_WAIT([ovn-northd])
as
OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
/connection dropped.*/d"])
AT_CLEANUP
])
OVN_FOR_EACH_NORTHD([
AT_SETUP([SNAT in separate zone from DNAT])
AT_SKIP_IF([test $HAVE_NC = no])
CHECK_CONNTRACK()
CHECK_CONNTRACK_NAT()
ovn_start
OVS_TRAFFIC_VSWITCHD_START()
ADD_BR([br-int])
# Set external-ids in br-int needed for ovn-controller
ovs-vsctl \
-- set Open_vSwitch . external-ids:system-id=hv1 \
-- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
-- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
-- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
-- set bridge br-int fail-mode=secure other-config:disable-in-band=true
# The goal of this test is to ensure that when traffic is first DNATted
# (by way of a load balancer), and then SNATted, the SNAT happens in a
# separate conntrack zone from the DNAT.
start_daemon ovn-controller
check ovn-nbctl ls-add public
check ovn-nbctl lr-add r1
check ovn-nbctl lrp-add r1 r1_public 00:de:ad:ff:00:01 172.16.0.1/16
check ovn-nbctl lrp-add r1 r1_s1 00:de:ad:fe:00:01 173.0.1.1/24
check ovn-nbctl lrp-set-gateway-chassis r1_public hv1
check ovn-nbctl lb-add r1_lb 30.0.0.1 172.16.0.102
check ovn-nbctl lr-lb-add r1 r1_lb
check ovn-nbctl ls-add s1
check ovn-nbctl lsp-add s1 s1_r1
check ovn-nbctl lsp-set-type s1_r1 router
check ovn-nbctl lsp-set-addresses s1_r1 router
check ovn-nbctl lsp-set-options s1_r1 router-port=r1_s1
check ovn-nbctl lsp-add s1 vm1
check ovn-nbctl lsp-set-addresses vm1 "00:de:ad:01:00:01 173.0.1.2"
check ovn-nbctl lsp-add public public_r1
check ovn-nbctl lsp-set-type public_r1 router
check ovn-nbctl lsp-set-addresses public_r1 router
check ovn-nbctl lsp-set-options public_r1 router-port=r1_public nat-addresses=router
check ovn-nbctl lr-add r2
check ovn-nbctl lrp-add r2 r2_public 00:de:ad:ff:00:02 172.16.0.2/16
check ovn-nbctl lrp-add r2 r2_s2 00:de:ad:fe:00:02 173.0.2.1/24
check ovn-nbctl lr-nat-add r2 dnat_and_snat 172.16.0.102 173.0.2.2
check ovn-nbctl lrp-set-gateway-chassis r2_public hv1
check ovn-nbctl ls-add s2
check ovn-nbctl lsp-add s2 s2_r2
check ovn-nbctl lsp-set-type s2_r2 router
check ovn-nbctl lsp-set-addresses s2_r2 router
check ovn-nbctl lsp-set-options s2_r2 router-port=r2_s2
check ovn-nbctl lsp-add s2 vm2
check ovn-nbctl lsp-set-addresses vm2 "00:de:ad:01:00:02 173.0.2.2"
check ovn-nbctl lsp-add public public_r2
check ovn-nbctl lsp-set-type public_r2 router
check ovn-nbctl lsp-set-addresses public_r2 router
check ovn-nbctl lsp-set-options public_r2 router-port=r2_public nat-addresses=router
ADD_NAMESPACES(vm1)
ADD_VETH(vm1, vm1, br-int, "173.0.1.2/24", "00:de:ad:01:00:01", \
"173.0.1.1")
ADD_NAMESPACES(vm2)
ADD_VETH(vm2, vm2, br-int, "173.0.2.2/24", "00:de:ad:01:00:02", \
"173.0.2.1")
check ovn-nbctl lr-nat-add r1 dnat_and_snat 172.16.0.101 173.0.1.2 vm1 00:00:00:01:02:03
wait_for_ports_up
check ovn-nbctl --wait=hv sync
# Create service that listens for TCP and UDP
NETNS_DAEMONIZE([vm2], [nc -l -u 1234], [nc0.pid])
NETNS_DAEMONIZE([vm2], [nc -l -k 1235], [nc1.pid])
test_icmp() {
# Make sure that a ping works as expected
NS_CHECK_EXEC([vm1], [ping -c 3 -i 0.3 -w 2 30.0.0.1 | FORMAT_PING], \
[0], [dnl
3 packets transmitted, 3 received, 0% packet loss, time 0ms
])
# Finally, make sure that conntrack shows two separate zones being used for
# DNAT and SNAT
AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.1) | \
sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
icmp,orig=(src=173.0.1.2,dst=30.0.0.1,id=<cleared>,type=8,code=0),reply=(src=172.16.0.102,dst=173.0.1.2,id=<cleared>,type=0,code=0),zone=<cleared>,mark=2
])
AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(172.16.0.102) | \
sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
icmp,orig=(src=172.16.0.101,dst=172.16.0.102,id=<cleared>,type=8,code=0),reply=(src=173.0.2.2,dst=172.16.0.101,id=<cleared>,type=0,code=0),zone=<cleared>
icmp,orig=(src=173.0.1.2,dst=172.16.0.102,id=<cleared>,type=8,code=0),reply=(src=172.16.0.102,dst=172.16.0.101,id=<cleared>,type=0,code=0),zone=<cleared>
])
}
test_udp() {
NS_CHECK_EXEC([vm1], [nc -u 30.0.0.1 1234 -p 1222 -z])
AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.1) | \
sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
udp,orig=(src=173.0.1.2,dst=30.0.0.1,sport=<cleared>,dport=<cleared>),reply=(src=172.16.0.102,dst=173.0.1.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2
])
AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(172.16.0.102) | \
sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
udp,orig=(src=172.16.0.101,dst=172.16.0.102,sport=<cleared>,dport=<cleared>),reply=(src=173.0.2.2,dst=172.16.0.101,sport=<cleared>,dport=<cleared>),zone=<cleared>
udp,orig=(src=173.0.1.2,dst=172.16.0.102,sport=<cleared>,dport=<cleared>),reply=(src=172.16.0.102,dst=172.16.0.101,sport=<cleared>,dport=<cleared>),zone=<cleared>
])
}
test_tcp() {
NS_CHECK_EXEC([vm1], [nc 30.0.0.1 1235 -z])
AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(30.0.0.1) | \
sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
tcp,orig=(src=173.0.1.2,dst=30.0.0.1,sport=<cleared>,dport=<cleared>),reply=(src=172.16.0.102,dst=173.0.1.2,sport=<cleared>,dport=<cleared>),zone=<cleared>,mark=2,protoinfo=(state=<cleared>)
])
AT_CHECK([ovs-appctl dpctl/dump-conntrack | FORMAT_CT(172.16.0.102) | \
sed -e 's/zone=[[0-9]]*/zone=<cleared>/'], [0], [dnl
tcp,orig=(src=172.16.0.101,dst=172.16.0.102,sport=<cleared>,dport=<cleared>),reply=(src=173.0.2.2,dst=172.16.0.101,sport=<cleared>,dport=<cleared>),zone=<cleared>,protoinfo=(state=<cleared>)
tcp,orig=(src=173.0.1.2,dst=172.16.0.102,sport=<cleared>,dport=<cleared>),reply=(src=172.16.0.102,dst=172.16.0.101,sport=<cleared>,dport=<cleared>),zone=<cleared>,protoinfo=(state=<cleared>)
])
}
for type in icmp udp tcp; do
AS_BOX([Testing $type])
# First time, when the packet needs to pass through pinctrl buffering
check ovs-appctl dpctl/flush-conntrack
ovn-sbctl --all destroy mac_binding
wait_row_count mac_binding 0
test_$type
# Second time with MAC binding being already set
check ovs-appctl dpctl/flush-conntrack
wait_row_count mac_binding 1 ip="172.16.0.102"
test_$type
done
OVS_APP_EXIT_AND_WAIT([ovn-controller])
as ovn-sb
OVS_APP_EXIT_AND_WAIT([ovsdb-server])
as ovn-nb
OVS_APP_EXIT_AND_WAIT([ovsdb-server])
as northd
OVS_APP_EXIT_AND_WAIT([ovn-northd])
as
OVS_TRAFFIC_VSWITCHD_STOP(["/failed to query port patch-.*/d
/connection dropped.*/d"])
AT_CLEANUP
])
OVN_FOR_EACH_NORTHD([
AT_SETUP([LR with SNAT fragmentation needed for external server])
AT_KEYWORDS([ovnlb])
CHECK_CONNTRACK()
CHECK_CONNTRACK_NAT()
ovn_start
OVS_TRAFFIC_VSWITCHD_START()
ADD_BR([br-int])
ADD_BR([br-ext])
dnl Logical network:
dnl 2 logical switches "public" (192.168.1.0/24) and "internal" (172.16.1.0/24)
dnl connected to a router lr.
dnl internal has a client.
dnl server is connected through localnet.
dnl
dnl Server IP 192.168.1.2 MTU 900
dnl Client IP 172.16.1.2 MTU 800
dnl
dnl SNAT for internal 172.16.1.2/24 with router ip 192.168.1.1.
check ovs-ofctl add-flow br-ext action=normal
# Set external-ids in br-int needed for ovn-controller
check ovs-vsctl \
-- set Open_vSwitch . external-ids:system-id=hv1 \
-- set Open_vSwitch . external-ids:ovn-remote=unix:$ovs_base/ovn-sb/ovn-sb.sock \
-- set Open_vSwitch . external-ids:ovn-encap-type=geneve \
-- set Open_vSwitch . external-ids:ovn-encap-ip=169.0.0.1 \
-- set bridge br-int fail-mode=secure other-config:disable-in-band=true \
-- set Open_vSwitch . external-ids:ovn-bridge-mappings=phynet:br-ext
dnl Start ovn-controller
start_daemon ovn-controller
check ovn-nbctl lr-add lr
check ovn-nbctl ls-add internal
check ovn-nbctl ls-add public
check ovn-nbctl lrp-add lr lr-pub 00:00:01:01:02:03 192.168.1.1/24
check ovn-nbctl lsp-add public pub-lr -- set Logical_Switch_Port pub-lr \
type=router options:router-port=lr-pub addresses=\"00:00:01:01:02:03\"
check ovn-nbctl lrp-add lr lr-internal 00:00:01:01:02:04 172.16.1.1/24
check ovn-nbctl lsp-add internal internal-lr -- set Logical_Switch_Port internal-lr \
type=router options:router-port=lr-internal addresses=\"00:00:01:01:02:04\"
ovn-nbctl lsp-add public ln_port \
-- lsp-set-addresses ln_port unknown \
-- lsp-set-type ln_port localnet \
-- lsp-set-options ln_port network_name=phynet
ADD_NAMESPACES(server)
ADD_VETH([server], [server], [br-ext], ["192.168.1.2/24"],
["f0:00:00:01:02:03"], ["192.168.1.1"])
NS_EXEC([server], [ip l set dev server mtu 900])
NS_EXEC([server], [ip l show dev server])
ADD_NAMESPACES(client)
ADD_VETH([client], [client], [br-int], ["172.16.1.2/24"],
["f0:00:0f:01:02:03"], ["172.16.1.1"])
NS_EXEC([client], [ip l set dev client mtu 800])
NS_EXEC([client], [ip l show dev client])
check ovn-nbctl lsp-add internal client \
-- lsp-set-addresses client "f0:00:0f:01:02:03 172.16.1.2"
dnl Config OVN load-balancer with a VIP. (not necessary, but if we do not
dnl have a load balancer and comment out snat, we will receive a stray fragment
dnl on the client side.)
dnl check ovn-nbctl lb-add lb1 192.168.1.20:4242 172.16.1.2:4242 udp
dnl check ovn-nbctl lr-lb-add lr lb1
check ovn-nbctl set logical_router lr options:chassis=hv1
check ovn-nbctl set logical_router_port lr-internal options:gateway_mtu=800
check ovn-nbctl lr-nat-add lr snat 192.168.1.1 172.16.1.2/24
check ovn-nbctl --wait=hv sync
ovn-nbctl show
ovs-vsctl show
ovn-appctl -t ovn-controller vlog/set vconn:file:dbg pinctrl:file:dbg
AT_DATA([server.py], [dnl
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = '192.168.1.2'
server_port = 4242
server = (server_address, server_port)
sock.bind(server)
print("Listening on ", server_address, ":", str(server_port), flush=True)
while True: