@@ -786,6 +786,201 @@ chcr_ktls_get_tx_flits(const struct sk_buff *skb, unsigned int key_ctx_len)
786
786
DIV_ROUND_UP (key_ctx_len + CHCR_KTLS_WR_SIZE , 8 );
787
787
}
788
788
789
+ /*
790
+ * chcr_ktls_check_tcp_options: To check if there is any TCP option availbale
791
+ * other than timestamp.
792
+ * @skb - skb contains partial record..
793
+ * return: 1 / 0
794
+ */
795
+ static int
796
+ chcr_ktls_check_tcp_options (struct tcphdr * tcp )
797
+ {
798
+ int cnt , opt , optlen ;
799
+ u_char * cp ;
800
+
801
+ cp = (u_char * )(tcp + 1 );
802
+ cnt = (tcp -> doff << 2 ) - sizeof (struct tcphdr );
803
+ for (; cnt > 0 ; cnt -= optlen , cp += optlen ) {
804
+ opt = cp [0 ];
805
+ if (opt == TCPOPT_EOL )
806
+ break ;
807
+ if (opt == TCPOPT_NOP ) {
808
+ optlen = 1 ;
809
+ } else {
810
+ if (cnt < 2 )
811
+ break ;
812
+ optlen = cp [1 ];
813
+ if (optlen < 2 || optlen > cnt )
814
+ break ;
815
+ }
816
+ switch (opt ) {
817
+ case TCPOPT_NOP :
818
+ break ;
819
+ default :
820
+ return 1 ;
821
+ }
822
+ }
823
+ return 0 ;
824
+ }
825
+
826
+ /*
827
+ * chcr_ktls_write_tcp_options : TP can't send out all the options, we need to
828
+ * send out separately.
829
+ * @tx_info - driver specific tls info.
830
+ * @skb - skb contains partial record..
831
+ * @q - TX queue.
832
+ * @tx_chan - channel number.
833
+ * return: NETDEV_TX_OK/NETDEV_TX_BUSY.
834
+ */
835
+ static int
836
+ chcr_ktls_write_tcp_options (struct chcr_ktls_info * tx_info , struct sk_buff * skb ,
837
+ struct sge_eth_txq * q , uint32_t tx_chan )
838
+ {
839
+ struct fw_eth_tx_pkt_wr * wr ;
840
+ struct cpl_tx_pkt_core * cpl ;
841
+ u32 ctrl , iplen , maclen ;
842
+ struct ipv6hdr * ip6 ;
843
+ unsigned int ndesc ;
844
+ struct tcphdr * tcp ;
845
+ int len16 , pktlen ;
846
+ struct iphdr * ip ;
847
+ int credits ;
848
+ u8 buf [150 ];
849
+ void * pos ;
850
+
851
+ iplen = skb_network_header_len (skb );
852
+ maclen = skb_mac_header_len (skb );
853
+
854
+ /* packet length = eth hdr len + ip hdr len + tcp hdr len
855
+ * (including options).
856
+ */
857
+ pktlen = skb -> len - skb -> data_len ;
858
+
859
+ ctrl = sizeof (* cpl ) + pktlen ;
860
+ len16 = DIV_ROUND_UP (sizeof (* wr ) + ctrl , 16 );
861
+ /* check how many descriptors needed */
862
+ ndesc = DIV_ROUND_UP (len16 , 4 );
863
+
864
+ credits = chcr_txq_avail (& q -> q ) - ndesc ;
865
+ if (unlikely (credits < 0 )) {
866
+ chcr_eth_txq_stop (q );
867
+ return NETDEV_TX_BUSY ;
868
+ }
869
+
870
+ pos = & q -> q .desc [q -> q .pidx ];
871
+ wr = pos ;
872
+
873
+ /* Firmware work request header */
874
+ wr -> op_immdlen = htonl (FW_WR_OP_V (FW_ETH_TX_PKT_WR ) |
875
+ FW_WR_IMMDLEN_V (ctrl ));
876
+
877
+ wr -> equiq_to_len16 = htonl (FW_WR_LEN16_V (len16 ));
878
+ wr -> r3 = 0 ;
879
+
880
+ cpl = (void * )(wr + 1 );
881
+
882
+ /* CPL header */
883
+ cpl -> ctrl0 = htonl (TXPKT_OPCODE_V (CPL_TX_PKT ) | TXPKT_INTF_V (tx_chan ) |
884
+ TXPKT_PF_V (tx_info -> adap -> pf ));
885
+ cpl -> pack = 0 ;
886
+ cpl -> len = htons (pktlen );
887
+ /* checksum offload */
888
+ cpl -> ctrl1 = 0 ;
889
+
890
+ pos = cpl + 1 ;
891
+
892
+ memcpy (buf , skb -> data , pktlen );
893
+ if (tx_info -> ip_family == AF_INET ) {
894
+ /* we need to correct ip header len */
895
+ ip = (struct iphdr * )(buf + maclen );
896
+ ip -> tot_len = htons (pktlen - maclen );
897
+ } else {
898
+ ip6 = (struct ipv6hdr * )(buf + maclen );
899
+ ip6 -> payload_len = htons (pktlen - maclen );
900
+ }
901
+ /* now take care of the tcp header, if fin is not set then clear push
902
+ * bit as well, and if fin is set, it will be sent at the last so we
903
+ * need to update the tcp sequence number as per the last packet.
904
+ */
905
+ tcp = (struct tcphdr * )(buf + maclen + iplen );
906
+
907
+ if (!tcp -> fin )
908
+ tcp -> psh = 0 ;
909
+ else
910
+ tcp -> seq = htonl (tx_info -> prev_seq );
911
+
912
+ chcr_copy_to_txd (buf , & q -> q , pos , pktlen );
913
+
914
+ chcr_txq_advance (& q -> q , ndesc );
915
+ cxgb4_ring_tx_db (tx_info -> adap , & q -> q , ndesc );
916
+ return 0 ;
917
+ }
918
+
919
+ /* chcr_ktls_skb_shift - Shifts request length paged data from skb to another.
920
+ * @tgt- buffer into which tail data gets added
921
+ * @skb- buffer from which the paged data comes from
922
+ * @shiftlen- shift up to this many bytes
923
+ */
924
+ static int chcr_ktls_skb_shift (struct sk_buff * tgt , struct sk_buff * skb ,
925
+ int shiftlen )
926
+ {
927
+ skb_frag_t * fragfrom , * fragto ;
928
+ int from , to , todo ;
929
+
930
+ WARN_ON (shiftlen > skb -> data_len );
931
+
932
+ todo = shiftlen ;
933
+ from = 0 ;
934
+ to = 0 ;
935
+ fragfrom = & skb_shinfo (skb )-> frags [from ];
936
+
937
+ while ((todo > 0 ) && (from < skb_shinfo (skb )-> nr_frags )) {
938
+ fragfrom = & skb_shinfo (skb )-> frags [from ];
939
+ fragto = & skb_shinfo (tgt )-> frags [to ];
940
+
941
+ if (todo >= skb_frag_size (fragfrom )) {
942
+ * fragto = * fragfrom ;
943
+ todo -= skb_frag_size (fragfrom );
944
+ from ++ ;
945
+ to ++ ;
946
+
947
+ } else {
948
+ __skb_frag_ref (fragfrom );
949
+ skb_frag_page_copy (fragto , fragfrom );
950
+ skb_frag_off_copy (fragto , fragfrom );
951
+ skb_frag_size_set (fragto , todo );
952
+
953
+ skb_frag_off_add (fragfrom , todo );
954
+ skb_frag_size_sub (fragfrom , todo );
955
+ todo = 0 ;
956
+
957
+ to ++ ;
958
+ break ;
959
+ }
960
+ }
961
+
962
+ /* Ready to "commit" this state change to tgt */
963
+ skb_shinfo (tgt )-> nr_frags = to ;
964
+
965
+ /* Reposition in the original skb */
966
+ to = 0 ;
967
+ while (from < skb_shinfo (skb )-> nr_frags )
968
+ skb_shinfo (skb )-> frags [to ++ ] = skb_shinfo (skb )-> frags [from ++ ];
969
+
970
+ skb_shinfo (skb )-> nr_frags = to ;
971
+
972
+ WARN_ON (todo > 0 && !skb_shinfo (skb )-> nr_frags );
973
+
974
+ skb -> len -= shiftlen ;
975
+ skb -> data_len -= shiftlen ;
976
+ skb -> truesize -= shiftlen ;
977
+ tgt -> len += shiftlen ;
978
+ tgt -> data_len += shiftlen ;
979
+ tgt -> truesize += shiftlen ;
980
+
981
+ return shiftlen ;
982
+ }
983
+
789
984
/*
790
985
* chcr_ktls_xmit_wr_complete: This sends out the complete record. If an skb
791
986
* received has partial end part of the record, send out the complete record, so
@@ -949,6 +1144,76 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
949
1144
return 0 ;
950
1145
}
951
1146
1147
+ /*
1148
+ * chcr_ktls_copy_record_in_skb
1149
+ * @nskb - new skb where the frags to be added.
1150
+ * @record - specific record which has complete 16k record in frags.
1151
+ */
1152
+ static void chcr_ktls_copy_record_in_skb (struct sk_buff * nskb ,
1153
+ struct tls_record_info * record )
1154
+ {
1155
+ int i = 0 ;
1156
+
1157
+ for (i = 0 ; i < record -> num_frags ; i ++ ) {
1158
+ skb_shinfo (nskb )-> frags [i ] = record -> frags [i ];
1159
+ /* increase the frag ref count */
1160
+ __skb_frag_ref (& skb_shinfo (nskb )-> frags [i ]);
1161
+ }
1162
+
1163
+ skb_shinfo (nskb )-> nr_frags = record -> num_frags ;
1164
+ nskb -> data_len = record -> len ;
1165
+ nskb -> len += record -> len ;
1166
+ nskb -> truesize += record -> len ;
1167
+ }
1168
+
1169
+ /*
1170
+ * chcr_ktls_update_snd_una: Reset the SEND_UNA. It will be done to avoid
1171
+ * sending the same segment again. It will discard the segment which is before
1172
+ * the current tx max.
1173
+ * @tx_info - driver specific tls info.
1174
+ * @q - TX queue.
1175
+ * return: NET_TX_OK/NET_XMIT_DROP.
1176
+ */
1177
+ static int chcr_ktls_update_snd_una (struct chcr_ktls_info * tx_info ,
1178
+ struct sge_eth_txq * q )
1179
+ {
1180
+ struct fw_ulptx_wr * wr ;
1181
+ unsigned int ndesc ;
1182
+ int credits ;
1183
+ void * pos ;
1184
+ u32 len ;
1185
+
1186
+ len = sizeof (* wr ) + roundup (CHCR_SET_TCB_FIELD_LEN , 16 );
1187
+ ndesc = DIV_ROUND_UP (len , 64 );
1188
+
1189
+ credits = chcr_txq_avail (& q -> q ) - ndesc ;
1190
+ if (unlikely (credits < 0 )) {
1191
+ chcr_eth_txq_stop (q );
1192
+ return NETDEV_TX_BUSY ;
1193
+ }
1194
+
1195
+ pos = & q -> q .desc [q -> q .pidx ];
1196
+
1197
+ wr = pos ;
1198
+ /* ULPTX wr */
1199
+ wr -> op_to_compl = htonl (FW_WR_OP_V (FW_ULPTX_WR ));
1200
+ wr -> cookie = 0 ;
1201
+ /* fill len in wr field */
1202
+ wr -> flowid_len16 = htonl (FW_WR_LEN16_V (DIV_ROUND_UP (len , 16 )));
1203
+
1204
+ pos += sizeof (* wr );
1205
+
1206
+ pos = chcr_write_cpl_set_tcb_ulp (tx_info , q , tx_info -> tid , pos ,
1207
+ TCB_SND_UNA_RAW_W ,
1208
+ TCB_SND_UNA_RAW_V (TCB_SND_UNA_RAW_M ),
1209
+ TCB_SND_UNA_RAW_V (0 ), 0 );
1210
+
1211
+ chcr_txq_advance (& q -> q , ndesc );
1212
+ cxgb4_ring_tx_db (tx_info -> adap , & q -> q , ndesc );
1213
+
1214
+ return 0 ;
1215
+ }
1216
+
952
1217
/*
953
1218
* chcr_end_part_handler: This handler will handle the record which
954
1219
* is complete or if record's end part is received. T6 adapter has a issue that
@@ -978,8 +1243,22 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
978
1243
if (tls_end_offset == record -> len ) {
979
1244
nskb = skb ;
980
1245
} else {
981
- /* handle it in next patch */
982
- goto out ;
1246
+ dev_kfree_skb_any (skb );
1247
+
1248
+ nskb = alloc_skb (0 , GFP_KERNEL );
1249
+ if (!nskb )
1250
+ return NETDEV_TX_BUSY ;
1251
+ /* copy complete record in skb */
1252
+ chcr_ktls_copy_record_in_skb (nskb , record );
1253
+ /* packet is being sent from the beginning, update the tcp_seq
1254
+ * accordingly.
1255
+ */
1256
+ tcp_seq = tls_record_start_seq (record );
1257
+ /* reset snd una, so the middle record won't send the already
1258
+ * sent part.
1259
+ */
1260
+ if (chcr_ktls_update_snd_una (tx_info , q ))
1261
+ goto out ;
983
1262
}
984
1263
985
1264
if (chcr_ktls_xmit_wr_complete (nskb , tx_info , q , tcp_seq ,
@@ -989,8 +1268,7 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
989
1268
}
990
1269
return 0 ;
991
1270
out :
992
- if (nskb )
993
- dev_kfree_skb_any (nskb );
1271
+ dev_kfree_skb_any (nskb );
994
1272
return NETDEV_TX_BUSY ;
995
1273
}
996
1274
@@ -1049,6 +1327,13 @@ int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
1049
1327
qidx = skb -> queue_mapping ;
1050
1328
q = & adap -> sge .ethtxq [qidx + tx_info -> first_qset ];
1051
1329
cxgb4_reclaim_completed_tx (adap , & q -> q , true);
1330
+ /* if tcp options are set but finish is not send the options first */
1331
+ if (!th -> fin && chcr_ktls_check_tcp_options (th )) {
1332
+ ret = chcr_ktls_write_tcp_options (tx_info , skb , q ,
1333
+ tx_info -> tx_chan );
1334
+ if (ret )
1335
+ return NETDEV_TX_BUSY ;
1336
+ }
1052
1337
/* update tcb */
1053
1338
ret = chcr_ktls_xmit_tcb_cpls (tx_info , q , ntohl (th -> seq ),
1054
1339
ntohl (th -> ack_seq ),
@@ -1063,7 +1348,7 @@ int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
1063
1348
1064
1349
/* go through the skb and send only one record at a time. */
1065
1350
data_len = skb -> data_len ;
1066
- /* TCP segments can be in received from host either complete or partial.
1351
+ /* TCP segments can be in received either complete or partial.
1067
1352
* chcr_end_part_handler will handle cases if complete record or end
1068
1353
* part of the record is received. Incase of partial end part of record,
1069
1354
* we will send the complete record again.
@@ -1108,8 +1393,14 @@ int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
1108
1393
struct sk_buff * nskb = NULL ;
1109
1394
1110
1395
if (tls_end_offset < data_len ) {
1111
- /* handle it later */
1112
- goto clear_ref ;
1396
+ nskb = alloc_skb (0 , GFP_KERNEL );
1397
+ if (unlikely (!nskb )) {
1398
+ ret = - ENOMEM ;
1399
+ goto clear_ref ;
1400
+ }
1401
+
1402
+ chcr_ktls_skb_shift (nskb , local_skb ,
1403
+ tls_end_offset );
1113
1404
} else {
1114
1405
/* its the only record in this skb, directly
1115
1406
* point it.
@@ -1145,6 +1436,12 @@ int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
1145
1436
} while (data_len > 0 );
1146
1437
1147
1438
tx_info -> prev_seq = ntohl (th -> seq ) + skb -> data_len ;
1439
+ /* tcp finish is set, send a separate tcp msg including all the options
1440
+ * as well.
1441
+ */
1442
+ if (th -> fin )
1443
+ chcr_ktls_write_tcp_options (tx_info , skb , q , tx_info -> tx_chan );
1444
+
1148
1445
out :
1149
1446
dev_kfree_skb_any (skb );
1150
1447
return NETDEV_TX_OK ;
0 commit comments