Handle implicit ACKs in DTLS 1.3
Receiving part of the next flight should implicitly ACK the previous
flight, in most cases. The final flight, and post-handshake messages are
tricky, but we'll handle and test those in later CLs.
Bug: 42290594
Change-Id: Ia47551a103c7ca224c305892f36e4a270c14a396
Reviewed-on: https://boringssl-review.googlesource.com/c/boringssl/+/72827
Commit-Queue: David Benjamin <davidben@google.com>
Reviewed-by: Nick Harper <nharper@chromium.org>
diff --git a/ssl/d1_both.cc b/ssl/d1_both.cc
index 16b32c1..841edfe 100644
--- a/ssl/d1_both.cc
+++ b/ssl/d1_both.cc
@@ -351,6 +351,7 @@
bool dtls1_process_handshake_fragments(SSL *ssl, uint8_t *out_alert,
Span<const uint8_t> record) {
+ bool implicit_ack = false;
CBS cbs = record;
while (CBS_len(&cbs) > 0) {
// Read a handshake fragment.
@@ -392,6 +393,27 @@
return false;
}
+ if (SSL_in_init(ssl) && ssl->s3->version != 0 &&
+ ssl_protocol_version(ssl) >= TLS1_3_VERSION) {
+ // During the handshake, if we receive any portion of the next flight, the
+ // peer must have received our most recent flight. In DTLS 1.3, this is an
+ // implicit ACK. See RFC 9147, Section 7.1.
+ //
+ // This only applies during the handshake. After the handshake, the next
+ // message may be part of a post-handshake transaction. It also does not
+ // apply immediately after the handshake. As a client, receiving a
+ // KeyUpdate or NewSessionTicket does not imply the server has received
+ // our Finished. The server may have sent those messages in half-RTT.
+ //
+ // TODO(crbug.com/42290594): If we're offering 0-RTT, the final version
+ // is not yet known to be DTLS 1.3 and we should not do this. Track early
+ // and final versions more accurately.
+ //
+ // TODO(crbug.com/42290594): Once post-handshake messages are working,
+ // write a test for the half-RTT KeyUpdate case.
+ implicit_ack = true;
+ }
+
if (msg_hdr.seq - ssl->d1->handshake_read_seq > SSL_MAX_HANDSHAKE_FLIGHT) {
// Ignore fragments too far in the future.
continue;
@@ -416,6 +438,11 @@
frag->reassembly.MarkRange(frag_off, frag_off + frag_len);
}
+ if (implicit_ack) {
+ dtls1_stop_timer(ssl);
+ dtls_clear_outgoing_messages(ssl);
+ }
+
return true;
}
diff --git a/ssl/test/runner/runner.go b/ssl/test/runner/runner.go
index 362a763..7b08099 100644
--- a/ssl/test/runner/runner.go
+++ b/ssl/test/runner/runner.go
@@ -11548,12 +11548,9 @@
flags: flags,
})
- // In DTLS 1.2, receiving a part of the next flight should not stop
- // the retransmission timer.
- //
- // TODO(crbug.com/42290594): In DTLS 1.3, it should stop the timer
- // because we can use ACKs to request a retransmit. Test this.
if vers.version <= VersionTLS12 {
+ // In DTLS 1.2, receiving a part of the next flight should not stop
+ // the retransmission timer.
testCases = append(testCases, testCase{
protocol: dtls,
name: "DTLS-Retransmit-PartialProgress" + suffix,
@@ -11582,6 +11579,98 @@
},
flags: flags,
})
+ } else {
+ // In DTLS 1.3, receiving a part of the next flight implicitly ACKs
+ // the previous flight.
+ testCases = append(testCases, testCase{
+ testType: serverTest,
+ protocol: dtls,
+ name: "DTLS-Retransmit-PartialProgress-Server" + suffix,
+ config: Config{
+ MaxVersion: vers.version,
+ DefaultCurves: []CurveID{}, // Force HelloRetryRequest.
+ Bugs: ProtocolBugs{
+ WriteFlightDTLS: func(c *DTLSController, prev, received, next []DTLSMessage, records []DTLSRecordNumberInfo) {
+ // Send a portion of the first message. The rest was lost.
+ msg := next[0]
+ split := len(msg.Data) / 2
+ c.WriteFragments([]DTLSFragment{msg.Fragment(0, split)})
+ // This is enough to ACK the previous flight. The shim
+ // should stop retransmitting and even stop the timer.
+ //
+ // TODO(crbug.com/42290594): The shim should send a partial
+ // ACK to request that we retransmit.
+ for _, t := range useTimeouts {
+ c.AdvanceClock(t)
+ }
+ // "Retransmit" the rest of the flight. The shim should remember
+ // the portion that was already sent.
+ rest := []DTLSFragment{msg.Fragment(split, len(msg.Data)-split)}
+ for _, m := range next[1:] {
+ rest = append(rest, m.Fragment(0, len(m.Data)))
+ }
+ c.WriteFragments(rest)
+ },
+ },
+ },
+ flags: flags,
+ })
+
+ // When the shim is a client, receiving fragments before the version is
+ // known does not trigger this behavior.
+ testCases = append(testCases, testCase{
+ protocol: dtls,
+ name: "DTLS-Retransmit-PartialProgress-Client" + suffix,
+ config: Config{
+ MaxVersion: vers.version,
+ Bugs: ProtocolBugs{
+ WriteFlightDTLS: func(c *DTLSController, prev, received, next []DTLSMessage, records []DTLSRecordNumberInfo) {
+ // Send a portion of the ServerHello. The rest was lost.
+ msg := next[0]
+ split := len(msg.Data) / 2
+ c.WriteFragments([]DTLSFragment{msg.Fragment(0, split)})
+
+ // The shim did not know this was DTLS 1.3, so it still
+ // retransmits ClientHello.
+ c.AdvanceClock(useTimeouts[0])
+ c.ReadRetransmit()
+
+ // Finish the ServerHello. The version is still not known,
+ // at the time the ServerHello fragment is processed, This
+ // is not as efficient as we could be; we could go back and
+ // implicitly ACK once the version is known. But the last
+ // byte of ServerHello will almost certainly be in the same
+ // packet as EncryptedExtensions, which will trigger the case
+ // below.
+ c.WriteFragments([]DTLSFragment{msg.Fragment(split, len(msg.Data)-split)})
+ c.AdvanceClock(useTimeouts[1])
+ c.ReadRetransmit()
+
+ // Send EncryptedExtensions. The shim now knows the version
+ // and implicitly ACKs everything.
+ c.WriteFragments([]DTLSFragment{next[1].Fragment(0, len(next[1].Data))})
+
+ // This is enough to ACK the previous flight. The shim
+ // should stop retransmitting and even stop the timer.
+ //
+ // TODO(crbug.com/42290594): The shim should send a partial
+ // ACK to request that we retransmit.
+ for _, t := range useTimeouts[2:] {
+ c.AdvanceClock(t)
+ }
+
+ // "Retransmit" the rest of the flight. The shim should remember
+ // the portion that was already sent.
+ var rest []DTLSFragment
+ for _, m := range next[2:] {
+ rest = append(rest, m.Fragment(0, len(m.Data)))
+ }
+ c.WriteFragments(rest)
+ },
+ },
+ },
+ flags: flags,
+ })
}
// Test that exceeding the timeout schedule hits a read