--- a/bin/varnishd/VSC_main.vsc
+++ b/bin/varnishd/VSC_main.vsc
@@ -293,6 +293,14 @@
 	Number of times an HTTP/2 stream was refused because the queue was
 	too long already. See also parameter thread_queue_limit.
 
+.. varnish_vsc:: req_reset
+	:oneliner:	Requests reset
+
+	Number of times a client left before the VCL processing of its
+	requests completed. For HTTP/2 sessions, either the stream was
+	reset by an RST_STREAM frame from the client, or a stream or
+	connection error occurred.
+
 .. varnish_vsc:: n_object
 	:type:	gauge
 	:oneliner:	object structs made
--- a/bin/varnishd/cache/cache_transport.h
+++ b/bin/varnishd/cache/cache_transport.h
@@ -42,6 +42,7 @@ typedef void vtr_sess_panic_f (struct vs
 typedef void vtr_req_panic_f (struct vsb *, const struct req *);
 typedef void vtr_req_fail_f (struct req *, enum sess_close);
 typedef void vtr_reembark_f (struct worker *, struct req *);
+typedef int vtr_poll_f (struct req *);
 typedef int vtr_minimal_response_f (struct req *, uint16_t status);
 
 struct transport {
@@ -62,6 +63,7 @@ struct transport {
 	vtr_sess_panic_f		*sess_panic;
 	vtr_req_panic_f			*req_panic;
 	vtr_reembark_f			*reembark;
+	vtr_poll_f			*poll;
 	vtr_minimal_response_f		*minimal_response;
 
 	VTAILQ_ENTRY(transport)		list;
--- a/bin/varnishd/cache/cache_vcl_vrt.c
+++ b/bin/varnishd/cache/cache_vcl_vrt.c
@@ -41,6 +41,7 @@
 #include "vtim.h"
 
 #include "cache_director.h"
+#include "cache_transport.h"
 #include "cache_vcl.h"
 #include "cache_filter.h"
 
@@ -401,6 +402,35 @@ VRT_rel_vcl(VRT_CTX, struct vclref **ref
  * The workspace argument is where random VCL stuff gets space from.
  */
 
+static int
+req_poll(struct worker *wrk, struct req *req)
+{
+
+	CHECK_OBJ_NOTNULL(req->top, REQ_MAGIC);
+	CHECK_OBJ_NOTNULL(req->top->transport, TRANSPORT_MAGIC);
+
+	/* NB: Since a fail transition leads to vcl_synth, the request may be
+	 * short-circuited twice.
+	 */
+	if (req->req_reset) {
+		wrk->handling = VCL_RET_FAIL;
+		return (-1);
+	}
+
+	if (!FEATURE(FEATURE_VCL_REQ_RESET))
+		return (0);
+	if (req->top->transport->poll == NULL)
+		return (0);
+	if (req->top->transport->poll(req->top) >= 0)
+		return (0);
+
+	VSLb_ts_req(req, "Reset", W_TIM_real(wrk));
+	wrk->stats->req_reset++;
+	wrk->handling = VCL_RET_FAIL;
+	req->req_reset = 1;
+	return (-1);
+}
+
 static void
 vcl_call_method(struct worker *wrk, struct req *req, struct busyobj *bo,
     void *specific, unsigned method, vcl_func_f *func)
@@ -414,6 +444,8 @@ vcl_call_method(struct worker *wrk, stru
 		CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
 		CHECK_OBJ_NOTNULL(req->sp, SESS_MAGIC);
 		CHECK_OBJ_NOTNULL(req->vcl, VCL_MAGIC);
+		if (req_poll(wrk, req))
+			return;
 		VCL_Req2Ctx(&ctx, req);
 	}
 	if (bo != NULL) {
--- a/bin/varnishd/http2/cache_http2_session.c
+++ b/bin/varnishd/http2/cache_http2_session.c
@@ -437,6 +437,16 @@ h2_new_session(struct worker *wrk, void
 	wrk->vsl = NULL;
 }
 
+static int v_matchproto_(vtr_poll_f)
+h2_poll(struct req *req)
+{
+	struct h2_req *r2;
+
+	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
+	CAST_OBJ_NOTNULL(r2, req->transport_priv, H2_REQ_MAGIC);
+	return (r2->error ? -1 : 1);
+}
+
 struct transport H2_transport = {
 	.name =			"H2",
 	.magic =		TRANSPORT_MAGIC,
@@ -446,4 +456,5 @@ struct transport H2_transport = {
 	.req_body =		h2_req_body,
 	.req_fail =		h2_req_fail,
 	.sess_panic =		h2_sess_panic,
+	.poll =			h2_poll,
 };
--- a/bin/varnishd/mgt/mgt_param_bits.c
+++ b/bin/varnishd/mgt/mgt_param_bits.c
@@ -211,8 +211,13 @@ tweak_feature(struct vsb *vsb, const str
 	unsigned j;
 	(void)par;
 
-	if (arg != NULL) {
-		if (!strcmp(arg, "none")) {
+	if (arg != NULL && arg != JSON_FMT) {
+		if (!strcmp(arg, "default")) {
+			AZ(bit_tweak(vsb, mgt_param.feature_bits,
+				FEATURE_Reserved,
+				"+vcl_req_reset",
+				feature_tags, "feature bit", "+"));
+		}else if (!strcmp(arg, "none")) {
 			memset(mgt_param.feature_bits,
 			    0, sizeof mgt_param.feature_bits);
 		} else {
@@ -260,6 +265,6 @@ struct parspec VSL_parspec[] = {
 #define FEATURE_BIT(U, l, d, ld) "\n\t" #l "\t" d
 #include "tbl/feature_bits.h"
 #undef FEATURE_BIT
-		, 0, "none", "" },
+		, 0, "default", "" },
 	{ NULL, NULL, NULL }
 };
--- a/bin/varnishd/storage/stevedore.c
+++ b/bin/varnishd/storage/stevedore.c
@@ -120,6 +120,7 @@ STV_AllocBuf(struct worker *wrk, const s
 	if (stv->allocbuf == NULL)
 		return (NULL);
 
+	wrk->strangelove = cache_param->nuke_limit;
 	buf = stv->allocbuf(wrk, stv, size + PRNDUP(sizeof *stvbuf), &priv);
 	if (buf == NULL)
 		return (NULL);
--- a/bin/varnishtest/tests/t02014.vtc
+++ b/bin/varnishtest/tests/t02014.vtc
@@ -1,6 +1,12 @@
 varnishtest "Exercise h/2 sender flow control code"
 
-barrier b1 sock 3 -cyclic
+barrier b1 sock 3
+barrier b2 sock 3
+barrier b3 sock 3
+barrier b4 sock 3
+
+barrier b2_err cond 2
+barrier b3_err cond 2
 
 server s1 {
 	rxreq
@@ -23,7 +29,9 @@ varnish v1 -vcl+backend {
 	}
 
 	sub vcl_deliver {
-		vtc.barrier_sync("${b1_sock}");
+		if (req.http.barrier) {
+			vtc.barrier_sync(req.http.barrier);
+		}
 	}
 } -start
 
@@ -43,7 +51,7 @@ client c1 {
 	} -start
 
 	stream 1 {
-		txreq
+		txreq -hdr barrier ${b1_sock}
 		barrier b1 sync
 		delay .5
 		txwinup -size 256
@@ -59,26 +67,44 @@ client c1 {
 	stream 0 -wait
 } -run
 
+varnish v1 -vsl_catchup
+
+logexpect l2 -v v1 -g raw {
+	expect * * ReqMethod GET
+	expect * = VCL_call DELIVER
+} -start
+
 client c2 {
 	stream 0 {
-		barrier b1 sync
+		barrier b2 sync
 	} -start
 
 	stream 1 {
-		txreq
+		txreq -hdr barrier ${b2_sock}
+		barrier b2_err sync
 		txdata -data "fail"
 		rxrst
 		expect rst.err == STREAM_CLOSED
-		barrier b1 sync
+		barrier b2 sync
 	} -run
 
 	stream 0 -wait
-} -run
+} -start
+
+logexpect l2 -wait
+barrier b2_err sync
+
+client c2 -wait
+
+logexpect l3 -v v1 -g raw {
+	expect * * ReqMethod POST
+	expect * = VCL_call DELIVER
+} -start
 
 client c3 {
 	stream 0 {
-		barrier b1 sync
-		barrier b1 sync
+		barrier b3 sync
+		barrier b4 sync
 		delay .5
 		txwinup -size 256
 		delay .5
@@ -89,17 +115,18 @@ client c3 {
 	} -start
 
 	stream 1 {
-		txreq -req "POST" -nostrend
+		txreq -req "POST" -hdr barrier ${b3_sock} -nostrend
 		txdata -data "ok"
+		barrier b3_err sync
 		txdata -data "fail"
 		rxrst
 		expect rst.err == STREAM_CLOSED
-		barrier b1 sync
+		barrier b3 sync
 	} -run
 
 	stream 3 {
-		txreq
-		barrier b1 sync
+		txreq -hdr barrier ${b4_sock}
+		barrier b4 sync
 		delay .5
 		txwinup -size 256
 		delay .5
@@ -112,4 +139,9 @@ client c3 {
 	} -run
 
 	stream 0 -wait
-} -run
+} -start
+
+logexpect l3 -wait
+barrier b3_err sync
+
+client c3 -wait
--- /dev/null
+++ b/bin/varnishtest/tests/t02025.vtc
@@ -0,0 +1,52 @@
+varnishtest "h2 reset interrupt"
+
+barrier b1 sock 2
+barrier b2 sock 2
+
+varnish v1 -cliok "param.set feature +http2"
+varnish v1 -cliok "param.set debug +syncvsl"
+varnish v1 -vcl {
+	import vtc;
+
+	backend be none;
+
+	sub vcl_recv {
+		vtc.barrier_sync("${b1_sock}");
+		vtc.barrier_sync("${b2_sock}");
+	}
+
+	sub vcl_miss {
+		vtc.panic("unreachable");
+	}
+} -start
+
+logexpect l1 -v v1 -g raw -i Debug {
+	expect * * Debug "^H2RXF RST_STREAM"
+} -start
+
+client c1 {
+	stream 1 {
+		txreq
+		barrier b1 sync
+		txrst
+	} -run
+	expect_close
+} -start
+
+logexpect l1 -wait
+barrier b2 sync
+
+client c1 -wait
+
+varnish v1 -vsl_catchup
+varnish v1 -expect req_reset == 1
+
+# NB: The varnishncsa command below shows a minimal pattern to collect
+# "rapid reset" suspects per session, with the IP address. Here rapid
+# is interpreted as before a second elapsed. Session VXIDs showing up
+# numerous times become increasingly more suspicious. The format can of
+# course be extended to add anything else useful for data mining.
+shell -expect "1000 ${localhost}" {
+	varnishncsa -n ${v1_name} -d \
+		-q 'Timestamp:Reset[2] < 1.0' -F '%{VSL:Begin[2]}x %h'
+}
--- /dev/null
+++ b/bin/varnishtest/tests/t02026.vtc
@@ -0,0 +1,48 @@
+varnishtest "Dublicate pseudo-headers"
+
+server s1 {
+	rxreq
+	txresp
+} -start
+
+varnish v1 -arg "-p feature=+http2" -vcl+backend {
+} -start
+
+#client c1 {
+#	txreq -url "/some/path" -url "/some/other/path"
+#	rxresp
+#	expect resp.status == 400
+#} -run
+
+#client c1 {
+#	txreq -req "GET" -req "POST"
+#	rxresp
+#	expect resp.status == 400
+#} -run
+
+#client c1 {
+#	txreq -proto "HTTP/1.1" -proto "HTTP/2.0"
+#	rxresp
+#	expect resp.status == 400
+#} -run
+
+client c1 {
+	stream 1 {
+		txreq -url "/some/path" -url "/some/other/path"
+		rxrst
+	} -run
+} -run
+
+client c1 {
+	stream 1 {
+		txreq -scheme "http" -scheme "https"
+		rxrst
+	} -run
+} -run
+
+client c1 {
+	stream 1 {
+		txreq -req "GET" -req "POST"
+		rxrst
+	} -run
+} -run
--- a/doc/sphinx/reference/vsl.rst
+++ b/doc/sphinx/reference/vsl.rst
@@ -71,6 +71,11 @@ Resp
 Restart
 	Client request is being restarted.
 
+Reset
+        The client closed its connection, reset its stream or caused
+        a stream error that forced Varnish to reset the stream. Request
+        processing is interrupted and considered failed.
+
 Pipe handling timestamps
 ~~~~~~~~~~~~~~~~~~~~~~~~
 
--- a/include/tbl/feature_bits.h
+++ b/include/tbl/feature_bits.h
@@ -83,6 +83,12 @@ FEATURE_BIT(HTTP_DATE_POSTEL,	http_date_
     "like Date:, Last-Modified:, Expires: etc."
 )
 
+FEATURE_BIT(VCL_REQ_RESET,			vcl_req_reset,
+    "Stop processing client VCL once the client is gone.",
+    "Stop processing client VCL once the client is gone. "
+    "When this happens MAIN.req_reset is incremented."
+)
+
 #undef FEATURE_BIT
 
 /*lint -restore */
--- a/include/tbl/req_flags.h
+++ b/include/tbl/req_flags.h
@@ -37,6 +37,7 @@ REQ_FLAG(is_hit,		0, 0, "")
 REQ_FLAG(waitinglist,		0, 0, "")
 REQ_FLAG(want100cont,		0, 0, "")
 REQ_FLAG(late100cont,		0, 0, "")
+REQ_FLAG(req_reset,		0, 0, "")
 #undef REQ_FLAG
 
 /*lint -restore */
