aboutsummaryrefslogtreecommitdiffstats
path: root/meta-networking/recipes-daemons
diff options
context:
space:
mode:
Diffstat (limited to 'meta-networking/recipes-daemons')
-rw-r--r--meta-networking/recipes-daemons/cyrus-sasl/cyrus-sasl_2.1.28.bb1
-rw-r--r--meta-networking/recipes-daemons/postfix/files/0006-makedefs-Account-for-linux-6.x-version.patch35
-rw-r--r--meta-networking/recipes-daemons/postfix/files/CVE-2023-51764-1.patch377
-rw-r--r--meta-networking/recipes-daemons/postfix/files/CVE-2023-51764-2.patch978
-rw-r--r--meta-networking/recipes-daemons/postfix/postfix_3.6.7.bb3
-rw-r--r--meta-networking/recipes-daemons/proftpd/files/CVE-2023-51713.patch277
-rw-r--r--meta-networking/recipes-daemons/proftpd/proftpd_1.3.7c.bb1
-rw-r--r--meta-networking/recipes-daemons/radvd/radvd.inc3
-rw-r--r--meta-networking/recipes-daemons/squid/files/CVE-2023-46728.patch608
-rw-r--r--meta-networking/recipes-daemons/squid/files/CVE-2023-46846-pre1.patch1154
-rw-r--r--meta-networking/recipes-daemons/squid/files/CVE-2023-46846.patch169
-rw-r--r--meta-networking/recipes-daemons/squid/files/CVE-2023-46847.patch47
-rw-r--r--meta-networking/recipes-daemons/squid/files/CVE-2023-49285.patch37
-rw-r--r--meta-networking/recipes-daemons/squid/files/CVE-2023-49286.patch87
-rw-r--r--meta-networking/recipes-daemons/squid/files/CVE-2023-50269.patch62
-rw-r--r--meta-networking/recipes-daemons/squid/squid_4.15.bb7
16 files changed, 3845 insertions, 1 deletions
diff --git a/meta-networking/recipes-daemons/cyrus-sasl/cyrus-sasl_2.1.28.bb b/meta-networking/recipes-daemons/cyrus-sasl/cyrus-sasl_2.1.28.bb
index e344733ef4..3fc1b0fd17 100644
--- a/meta-networking/recipes-daemons/cyrus-sasl/cyrus-sasl_2.1.28.bb
+++ b/meta-networking/recipes-daemons/cyrus-sasl/cyrus-sasl_2.1.28.bb
@@ -73,6 +73,7 @@ do_install:append() {
}
USERADD_PACKAGES = "${PN}-bin"
+GROUPADD_PARAM:${PN}-bin = "--system mail"
USERADD_PARAM:${PN}-bin = "--system --home=/var/spool/mail -g mail cyrus"
SYSTEMD_PACKAGES = "${PN}-bin"
diff --git a/meta-networking/recipes-daemons/postfix/files/0006-makedefs-Account-for-linux-6.x-version.patch b/meta-networking/recipes-daemons/postfix/files/0006-makedefs-Account-for-linux-6.x-version.patch
new file mode 100644
index 0000000000..ad1704520c
--- /dev/null
+++ b/meta-networking/recipes-daemons/postfix/files/0006-makedefs-Account-for-linux-6.x-version.patch
@@ -0,0 +1,35 @@
+From e5ddcf9575437bacd64c2b68501b413014186a6a Mon Sep 17 00:00:00 2001
+From: Khem Raj <raj.khem@gmail.com>
+Date: Wed, 19 Oct 2022 10:15:01 -0700
+Subject: [PATCH] makedefs: Account for linux 6.x version
+
+Major version has bumped to 6 and script needs to know that
+
+Upstream-Status: Pending
+Signed-off-by: Khem Raj <raj.khem@gmail.com>
+---
+ makedefs | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/makedefs
++++ b/makedefs
+@@ -613,7 +613,7 @@ EOF
+ : ${SHLIB_ENV="LD_LIBRARY_PATH=`pwd`/lib"}
+ : ${PLUGIN_LD="${CC-gcc} -shared"}
+ ;;
+- Linux.[345].*) SYSTYPE=LINUX$RELEASE_MAJOR
++ Linux.[3-6]*) SYSTYPE=LINUX$RELEASE_MAJOR
+ case "$CCARGS" in
+ *-DNO_DB*) ;;
+ *-DHAS_DB*) ;;
+--- a/src/util/sys_defs.h
++++ b/src/util/sys_defs.h
+@@ -751,7 +751,7 @@ extern int initgroups(const char *, int)
+ /*
+ * LINUX.
+ */
+-#if defined(LINUX2) || defined(LINUX3) || defined(LINUX4) || defined(LINUX5)
++#if defined(LINUX2) || defined(LINUX3) || defined(LINUX4) || defined(LINUX5) || defined(LINUX6)
+ #define SUPPORTED
+ #define UINT32_TYPE unsigned int
+ #define UINT16_TYPE unsigned short
diff --git a/meta-networking/recipes-daemons/postfix/files/CVE-2023-51764-1.patch b/meta-networking/recipes-daemons/postfix/files/CVE-2023-51764-1.patch
new file mode 100644
index 0000000000..65436b704e
--- /dev/null
+++ b/meta-networking/recipes-daemons/postfix/files/CVE-2023-51764-1.patch
@@ -0,0 +1,377 @@
+From a6596ec37a4892e1d9c2498ecbfc4b8e6be5156a Mon Sep 17 00:00:00 2001
+From: Wietse Venema <wietse@porcupine.org>
+Date: Fri, 22 Dec 2023 00:00:00 -0500
+Subject: [PATCH] postfix-3.6.13
+---
+Upstream-Status: Backport from [https://launchpad.net/ubuntu/+source/postfix/3.6.4-1ubuntu1.3]
+CVE: CVE-2023-51764
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+
+ man/man5/postconf.5 | 55 +++++++++++++++++++++++++++++++++++++++++++++++
+ man/man8/smtpd.8 | 9 +++++++
+ mantools/postlink | 2 +
+ proto/postconf.proto | 52 ++++++++++++++++++++++++++++++++++++++++++++
+ src/global/mail_params.h | 11 ++++++++-
+ src/global/smtp_stream.c | 14 +++++++++++
+ src/global/smtp_stream.h | 2 +
+ src/smtpd/smtpd.c | 42 +++++++++++++++++++++++++++++++++++
+ 8 files changed, 185 insertions(+), 2 deletions(-)
+
+--- a/man/man5/postconf.5
++++ b/man/man5/postconf.5
+@@ -10412,6 +10412,61 @@
+ parameter $name expansion.
+ .PP
+ This feature is available in Postfix 2.0 and later.
++.SH smtpd_forbid_bare_newline (default: Postfix < 3.9: no)
++Reply with "Error: bare <LF> received" and disconnect
++when a remote SMTP client sends a line ending in <LF>, violating
++the RFC 5321 requirement that lines must end in <CR><LF>.
++This feature is disbled by default with Postfix < 3.9. Use
++smtpd_forbid_bare_newline_exclusions to exclude non\-standard clients
++such as netcat. Specify "smtpd_forbid_bare_newline = no" to disable
++(not recommended for an Internet\-connected MTA).
++.PP
++See
++https://www.postfix.org/smtp\-smuggling.html for details.
++.PP
++Example:
++.sp
++.in +4
++.nf
++.na
++.ft C
++# Disconnect remote SMTP clients that send bare newlines, but allow
++# local clients with non\-standard SMTP implementations such as netcat,
++# fax machines, or load balancer health checks.
++#
++smtpd_forbid_bare_newline = yes
++smtpd_forbid_bare_newline_exclusions = $mynetworks
++.fi
++.ad
++.ft R
++.in -4
++.PP
++This feature is available in Postfix >= 3.9, 3.8.4, 3.7.9,
++3.6.13, and 3.5.23.
++.SH smtpd_forbid_bare_newline_exclusions (default: $mynetworks)
++Exclude the specified clients from smtpd_forbid_bare_newline
++enforcement. It uses the same syntax and parent\-domain matching
++behavior as mynetworks.
++.PP
++Example:
++.sp
++.in +4
++.nf
++.na
++.ft C
++# Disconnect remote SMTP clients that send bare newlines, but allow
++# local clients with non\-standard SMTP implementations such as netcat,
++# fax machines, or load balancer health checks.
++#
++smtpd_forbid_bare_newline = yes
++smtpd_forbid_bare_newline_exclusions = $mynetworks
++.fi
++.ad
++.ft R
++.in -4
++.PP
++This feature is available in Postfix >= 3.9, 3.8.4, 3.7.9,
++3.6.13, and 3.5.23.
+ .SH smtpd_forbidden_commands (default: CONNECT, GET, POST)
+ List of commands that cause the Postfix SMTP server to immediately
+ terminate the session with a 221 code. This can be used to disconnect
+--- a/man/man8/smtpd.8
++++ b/man/man8/smtpd.8
+@@ -808,6 +808,15 @@
+ The maximal number of AUTH commands that any client is allowed to
+ send to this service per time unit, regardless of whether or not
+ Postfix actually accepts those commands.
++.PP
++Available in Postfix 3.9, 3.8.4, 3.7.9, 3.6.13, 3.5.23 and later:
++.IP "\fBsmtpd_forbid_bare_newline (Postfix < 3.9: no)\fR"
++Reply with "Error: bare <LF> received" and disconnect
++when a remote SMTP client sends a line ending in <LF>, violating
++the RFC 5321 requirement that lines must end in <CR><LF>.
++.IP "\fBsmtpd_forbid_bare_newline_exclusions ($mynetworks)\fR"
++Exclude the specified clients from smtpd_forbid_bare_newline
++enforcement.
+ .SH "TARPIT CONTROLS"
+ .na
+ .nf
+--- a/mantools/postlink
++++ b/mantools/postlink
+@@ -547,6 +547,8 @@
+ s;\bsmtpd_error_sleep_time\b;<a href="postconf.5.html#smtpd_error_sleep_time">$&</a>;g;
+ s;\bsmtpd_etrn_restrictions\b;<a href="postconf.5.html#smtpd_etrn_restrictions">$&</a>;g;
+ s;\bsmtpd_expansion_filter\b;<a href="postconf.5.html#smtpd_expansion_filter">$&</a>;g;
++ s;\bsmtpd_for[-</bB>]*\n*[ <bB>]*bid_bare_newline\b;<a href="postconf.5.html#smtpd_forbi d_bare_newline">$&</a>;g;
++ s;\bsmtpd_for[-</bB>]*\n*[ <bB>]*bid_bare_newline_exclusions\b;<a href="postconf.5.html# smtpd_forbid_bare_newline_exclusions">$&</a>;g;
+ s;\bsmtpd_for[-</bB>]*\n*[ <bB>]*bidden_commands\b;<a href="postconf.5.html#smtpd_forbidden_commands">$&</a>;g;
+ s;\bsmtpd_hard_error_limit\b;<a href="postconf.5.html#smtpd_hard_error_limit">$&</a>;g;
+ s;\bsmtpd_helo_required\b;<a href="postconf.5.html#smtpd_helo_required">$&</a>;g;
+--- a/proto/postconf.proto
++++ b/proto/postconf.proto
+@@ -18058,3 +18058,55 @@
+ name or port number. </p>
+
+ <p> This feature is available in Postfix 3.6 and later. </p>
++
++%PARAM smtpd_forbid_bare_newline Postfix &lt; 3.9: no
++
++<p> Reply with "Error: bare &lt;LF&gt; received" and disconnect
++when a remote SMTP client sends a line ending in &lt;LF&gt;, violating
++the RFC 5321 requirement that lines must end in &lt;CR&gt;&lt;LF&gt;.
++This feature is disbled by default with Postfix &lt; 3.9. Use
++smtpd_forbid_bare_newline_exclusions to exclude non-standard clients
++such as netcat. Specify "smtpd_forbid_bare_newline = no" to disable
++(not recommended for an Internet-connected MTA). </p>
++
++<p> See <a href="https://www.postfix.org/smtp-smuggling.html">
++https://www.postfix.org/smtp-smuggling.html</a> for details.
++
++<p> Example: </p>
++
++<blockquote>
++<pre>
++# Disconnect remote SMTP clients that send bare newlines, but allow
++# local clients with non-standard SMTP implementations such as netcat,
++# fax machines, or load balancer health checks.
++#
++smtpd_forbid_bare_newline = yes
++smtpd_forbid_bare_newline_exclusions = $mynetworks
++</pre>
++</blockquote>
++
++<p> This feature is available in Postfix &ge; 3.9, 3.8.4, 3.7.9,
++3.6.13, and 3.5.23. </p>
++
++%PARAM smtpd_forbid_bare_newline_exclusions $mynetworks
++
++<p> Exclude the specified clients from smtpd_forbid_bare_newline
++enforcement. It uses the same syntax and parent-domain matching
++behavior as mynetworks. </p>
++
++<p> Example: </p>
++
++<blockquote>
++<pre>
++# Disconnect remote SMTP clients that send bare newlines, but allow
++# local clients with non-standard SMTP implementations such as netcat,
++# fax machines, or load balancer health checks.
++#
++smtpd_forbid_bare_newline = yes
++smtpd_forbid_bare_newline_exclusions = $mynetworks
++</pre>
++</blockquote>
++
++<p> This feature is available in Postfix &ge; 3.9, 3.8.4, 3.7.9,
++3.6.13, and 3.5.23. </p>
++
+--- a/src/global/mail_params.h
++++ b/src/global/mail_params.h
+@@ -4170,7 +4170,16 @@
+ extern char *var_smtpd_dns_re_filter;
+
+ /*
+- * Share TLS sessions through tlproxy(8).
++ * Backwards compatibility.
++ */
++#define VAR_SMTPD_FORBID_BARE_LF "smtpd_forbid_bare_newline"
++#define DEF_SMTPD_FORBID_BARE_LF 0
++
++#define VAR_SMTPD_FORBID_BARE_LF_EXCL "smtpd_forbid_bare_newline_exclusions"
++#define DEF_SMTPD_FORBID_BARE_LF_EXCL "$" VAR_MYNETWORKS
++
++ /*
++ * Share TLS sessions through tlsproxy(8).
+ */
+ #define VAR_SMTP_TLS_CONN_REUSE "smtp_tls_connection_reuse"
+ #define DEF_SMTP_TLS_CONN_REUSE 0
+--- a/src/global/smtp_stream.c
++++ b/src/global/smtp_stream.c
+@@ -50,6 +50,8 @@
+ /* VSTREAM *stream;
+ /* char *format;
+ /* va_list ap;
++/*
++/* int smtp_forbid_bare_lf;
+ /* AUXILIARY API
+ /* int smtp_get_noexcept(vp, stream, maxlen, flags)
+ /* VSTRING *vp;
+@@ -124,11 +126,16 @@
+ /* smtp_vprintf() is the machine underneath smtp_printf().
+ /*
+ /* smtp_get_noexcept() implements the subset of smtp_get()
+-/* without timeouts and without making long jumps. Instead,
++/* without long jumps for timeout or EOF errors. Instead,
+ /* query the stream status with vstream_feof() etc.
++/* This function will make a VSTREAM long jump (error code
++/* SMTP_ERR_LF) when rejecting input with a bare newline byte.
+ /*
+ /* smtp_timeout_setup() is a backwards-compatibility interface
+ /* for programs that don't require per-record deadline support.
++/*
++/* smtp_forbid_bare_lf controls whether smtp_get_noexcept()
++/* will reject input with a bare newline byte.
+ /* DIAGNOSTICS
+ /* .fi
+ /* .ad
+@@ -201,6 +208,8 @@
+
+ #include "smtp_stream.h"
+
++int smtp_forbid_bare_lf;
++
+ /* smtp_timeout_reset - reset per-stream error flags, restart deadline timer */
+
+ static void smtp_timeout_reset(VSTREAM *stream)
+@@ -404,6 +413,9 @@
+ */
+ case '\n':
+ vstring_truncate(vp, VSTRING_LEN(vp) - 1);
++ if (smtp_forbid_bare_lf
++ && (VSTRING_LEN(vp) == 0 || vstring_end(vp)[-1] != '\r'))
++ vstream_longjmp(stream, SMTP_ERR_LF);
+ while (VSTRING_LEN(vp) > 0 && vstring_end(vp)[-1] == '\r')
+ vstring_truncate(vp, VSTRING_LEN(vp) - 1);
+ VSTRING_TERMINATE(vp);
+--- a/src/global/smtp_stream.h
++++ b/src/global/smtp_stream.h
+@@ -32,6 +32,7 @@
+ #define SMTP_ERR_QUIET 3 /* silent cleanup (application) */
+ #define SMTP_ERR_NONE 4 /* non-error case */
+ #define SMTP_ERR_DATA 5 /* application data error */
++#define SMTP_ERR_LF 6 /* bare <LF> protocol error */
+
+ extern void smtp_stream_setup(VSTREAM *, int, int);
+ extern void PRINTFLIKE(2, 3) smtp_printf(VSTREAM *, const char *,...);
+@@ -43,6 +44,7 @@
+ extern void smtp_fwrite(const char *, ssize_t len, VSTREAM *);
+ extern void smtp_fread_buf(VSTRING *, ssize_t len, VSTREAM *);
+ extern void smtp_fputc(int, VSTREAM *);
++extern int smtp_forbid_bare_lf;
+
+ extern void smtp_vprintf(VSTREAM *, const char *, va_list);
+
+--- a/src/smtpd/smtpd.c
++++ b/src/smtpd/smtpd.c
+@@ -762,6 +762,15 @@
+ /* The maximal number of AUTH commands that any client is allowed to
+ /* send to this service per time unit, regardless of whether or not
+ /* Postfix actually accepts those commands.
++/* .PP
++/* Available in Postfix 3.9, 3.8.4, 3.7.9, 3.6.13, 3.5.23 and later:
++/* .IP "\fBsmtpd_forbid_bare_newline (Postfix < 3.9: no)\fR"
++/* Reply with "Error: bare <LF> received" and disconnect
++/* when a remote SMTP client sends a line ending in <LF>, violating
++/* the RFC 5321 requirement that lines must end in <CR><LF>.
++/* .IP "\fBsmtpd_forbid_bare_newline_exclusions ($mynetworks)\fR"
++/* Exclude the specified clients from smtpd_forbid_bare_newline
++/* enforcement.
+ /* TARPIT CONTROLS
+ /* .ad
+ /* .fi
+@@ -1467,6 +1476,10 @@
+ int var_smtpd_uproxy_tmout;
+ bool var_relay_before_rcpt_checks;
+
++bool var_smtpd_forbid_bare_lf;
++char *var_smtpd_forbid_bare_lf_excl;
++static NAMADR_LIST *bare_lf_excl;
++
+ /*
+ * Silly little macros.
+ */
+@@ -1541,6 +1554,7 @@
+ #define REASON_TIMEOUT "timeout"
+ #define REASON_LOST_CONNECTION "lost connection"
+ #define REASON_ERROR_LIMIT "too many errors"
++#define REASON_BARE_LF "bare <LF> received"
+
+ #ifdef USE_TLS
+
+@@ -3967,6 +3981,7 @@
+ */
+ done = 0;
+ do {
++ int payload_err;
+
+ /*
+ * Do not skip the smtp_fread_buf() call if read_len == 0. We still
+@@ -3980,6 +3995,10 @@
+ smtp_fread_buf(state->buffer, read_len, state->client);
+ state->bdat_get_stream = vstream_memreopen(
+ state->bdat_get_stream, state->buffer, O_RDONLY);
++ vstream_control(state->bdat_get_stream, CA_VSTREAM_CTL_EXCEPT,
++ CA_VSTREAM_CTL_END);
++ if ((payload_err = vstream_setjmp(state->bdat_get_stream)) != 0)
++ vstream_longjmp(state->client, payload_err);
+
+ /*
+ * Read lines from the fragment. The last line may continue in the
+@@ -4655,6 +4674,9 @@
+ */
+ xclient_allowed =
+ namadr_list_match(xclient_hosts, state->name, state->addr);
++ smtp_forbid_bare_lf = SMTPD_STAND_ALONE((state)) == 0
++ && var_smtpd_forbid_bare_lf
++ && !namadr_list_match(bare_lf_excl, state->name, state->addr);
+ /* NOT: tls_reset() */
+ if (got_helo == 0)
+ helo_reset(state);
+@@ -5446,6 +5468,13 @@
+ var_myhostname);
+ break;
+
++ case SMTP_ERR_LF:
++ state->reason = REASON_BARE_LF;
++ if (vstream_setjmp(state->client) == 0)
++ smtpd_chat_reply(state, "521 5.5.2 %s Error: bare <LF> received",
++ var_myhostname);
++ break;
++
+ case 0:
+
+ /*
+@@ -5995,6 +6024,13 @@
+ namadr_list_match(xforward_hosts, state.name, state.addr);
+
+ /*
++ * Enforce strict SMTP line endings, with compatibility exclusions.
++ */
++ smtp_forbid_bare_lf = SMTPD_STAND_ALONE((&state)) == 0
++ && var_smtpd_forbid_bare_lf
++ && !namadr_list_match(bare_lf_excl, state.name, state.addr);
++
++ /*
+ * See if we need to turn on verbose logging for this client.
+ */
+ debug_peer_check(state.name, state.addr);
+@@ -6055,6 +6091,10 @@
+ hogger_list = namadr_list_init(VAR_SMTPD_HOGGERS, MATCH_FLAG_RETURN
+ | match_parent_style(VAR_SMTPD_HOGGERS),
+ var_smtpd_hoggers);
++ bare_lf_excl = namadr_list_init(VAR_SMTPD_FORBID_BARE_LF_EXCL,
++ MATCH_FLAG_RETURN
++ | match_parent_style(VAR_MYNETWORKS),
++ var_smtpd_forbid_bare_lf_excl);
+
+ /*
+ * Open maps before dropping privileges so we can read passwords etc.
+@@ -6412,6 +6452,7 @@
+ VAR_SMTPD_PEERNAME_LOOKUP, DEF_SMTPD_PEERNAME_LOOKUP, &var_smtpd_peername_lookup,
+ VAR_SMTPD_DELAY_OPEN, DEF_SMTPD_DELAY_OPEN, &var_smtpd_delay_open,
+ VAR_SMTPD_CLIENT_PORT_LOG, DEF_SMTPD_CLIENT_PORT_LOG, &var_smtpd_client_port_log,
++ VAR_SMTPD_FORBID_BARE_LF, DEF_SMTPD_FORBID_BARE_LF, &var_smtpd_forbid_bare_lf,
+ 0,
+ };
+ static const CONFIG_NBOOL_TABLE nbool_table[] = {
+@@ -6527,6 +6568,7 @@
+ VAR_SMTPD_POLICY_CONTEXT, DEF_SMTPD_POLICY_CONTEXT, &var_smtpd_policy_context, 0, 0,
+ VAR_SMTPD_DNS_RE_FILTER, DEF_SMTPD_DNS_RE_FILTER, &var_smtpd_dns_re_filter, 0, 0,
+ VAR_SMTPD_REJ_FTR_MAPS, DEF_SMTPD_REJ_FTR_MAPS, &var_smtpd_rej_ftr_maps, 0, 0,
++ VAR_SMTPD_FORBID_BARE_LF_EXCL, DEF_SMTPD_FORBID_BARE_LF_EXCL, &var_smtpd_forbid_bare_lf_excl, 0, 0,
+ 0,
+ };
+ static const CONFIG_RAW_TABLE raw_table[] = {
diff --git a/meta-networking/recipes-daemons/postfix/files/CVE-2023-51764-2.patch b/meta-networking/recipes-daemons/postfix/files/CVE-2023-51764-2.patch
new file mode 100644
index 0000000000..e97a088557
--- /dev/null
+++ b/meta-networking/recipes-daemons/postfix/files/CVE-2023-51764-2.patch
@@ -0,0 +1,978 @@
+From cb3b1cbda3dec086a7f4541fe64751d9bb2988bd Mon Sep 17 00:00:00 2001
+From: Wietse Venema <wietse@porcupine.org>
+Date: Sun, 21 Jan 2024 00:00:00 -0500
+Subject: [PATCH] postfix-3.6.14
+
+---
+
+Upstream-Status: Backport from [https://launchpad.net/ubuntu/+source/postfix/3.6.4-1ubuntu1.3]
+CVE: CVE-2023-51764
+Signed-off-by: Ashish Sharma <asharma@mvista.com>
+
+ man/man5/postconf.5 | 173 +++++++++++++++++++++++++++++++++++-------
+ man/man8/cleanup.8 | 8 +
+ man/man8/smtpd.8 | 11 +-
+ mantools/postlink | 6 -
+ proto/postconf.proto | 142 +++++++++++++++++++++++++++-------
+ src/cleanup/cleanup.c | 8 +
+ src/cleanup/cleanup_init.c | 2
+ src/cleanup/cleanup_message.c | 17 ++++
+ src/global/cleanup_strerror.c | 1
+ src/global/cleanup_user.h | 6 +
+ src/global/mail_params.h | 9 +-
+ src/global/smtp_stream.c | 34 +++++---
+ src/global/smtp_stream.h | 4
+ src/smtpd/smtpd.c | 114 ++++++++++++++++++++-------
+ src/smtpd/smtpd_check.c | 14 ++-
+ src/smtpd/smtpd_check.h | 1
+ 16 files changed, 443 insertions(+), 107 deletions(-)
+
+--- a/man/man5/postconf.5
++++ b/man/man5/postconf.5
+@@ -845,6 +845,32 @@
+ .fi
+ .ad
+ .ft R
++.SH cleanup_replace_stray_cr_lf (default: yes)
++Replace each stray <CR> or <LF> character in message
++content with a space character, to prevent outbound SMTP smuggling,
++and to make the evaluation of Postfix\-added DKIM or other signatures
++independent from how a remote mail server handles such characters.
++.PP
++SMTP does not allow such characters unless they are part of a
++<CR><LF> sequence, and different mail systems handle
++such stray characters in an implementation\-dependent manner. Stray
++<CR> or <LF> characters could be used for outbound
++SMTP smuggling, where an attacker uses a Postfix server to send
++message content with a non\-standard End\-of\-DATA sequence that
++triggers inbound SMTP smuggling at a remote SMTP server.
++.PP
++The replacement happens before all other content management,
++and before Postfix may add a DKIM etc. signature; if the signature
++were created first, the replacement could invalidate the signature.
++.PP
++In addition to preventing SMTP smuggling, replacing stray
++<CR> or <LF> characters ensures that the result of
++signature validation by later mail system will not depend on how
++that mail system handles those stray characters in an
++implementation\-dependent manner.
++.PP
++This feature is available in Postfix >= 3.9, 3.8.5, 3.7.10,
++3.6.14, and 3.5.24.
+ .SH cleanup_service_name (default: cleanup)
+ The name of the \fBcleanup\fR(8) service. This service rewrites addresses
+ into the standard form, and performs \fBcanonical\fR(5) address mapping
+@@ -10413,60 +10439,153 @@
+ .PP
+ This feature is available in Postfix 2.0 and later.
+ .SH smtpd_forbid_bare_newline (default: Postfix < 3.9: no)
+-Reply with "Error: bare <LF> received" and disconnect
+-when a remote SMTP client sends a line ending in <LF>, violating
+-the RFC 5321 requirement that lines must end in <CR><LF>.
+-This feature is disbled by default with Postfix < 3.9. Use
+-smtpd_forbid_bare_newline_exclusions to exclude non\-standard clients
+-such as netcat. Specify "smtpd_forbid_bare_newline = no" to disable
+-(not recommended for an Internet\-connected MTA).
+-.PP
+-See
+-https://www.postfix.org/smtp\-smuggling.html for details.
++Reject or restrict input lines from an SMTP client that end in
++<LF> instead of the standard <CR><LF>. Such line
++endings are commonly allowed with UNIX\-based SMTP servers, but they
++violate RFC 5321, and allowing such line endings can make a server
++vulnerable to
++SMTP smuggling.
++.PP
++Specify one of the following values (case does not matter):
++.IP "\fBnormalize\fR"
++Require the standard
++End\-of\-DATA sequence <CR><LF>.<CR><LF>.
++Otherwise, allow command or message content lines ending in the
++non\-standard <LF>, and process them as if the client sent the
++standard <CR><LF>.
++.br
++.br
++This maintains compatibility
++with many legitimate SMTP client applications that send a mix of
++standard and non\-standard line endings, but will fail to receive
++email from client implementations that do not terminate DATA content
++with the standard End\-of\-DATA sequence
++<CR><LF>.<CR><LF>.
++.br
++.br
++Such clients
++can be excluded with smtpd_forbid_bare_newline_exclusions.
++.br
++.IP "\fByes\fR"
++Compatibility alias for \fBnormalize\fR.
++.br
++.IP "\fBreject\fR"
++Require the standard End\-of\-DATA
++sequence <CR><LF>.<CR><LF>. Reject a command
++or message content when a line contains bare <LF>, log a "bare
++<LF> received" error, and reply with the SMTP status code in
++$smtpd_forbid_bare_newline_reject_code.
++.br
++.br
++This will reject
++email from SMTP clients that send any non\-standard line endings
++such as web applications, netcat, or load balancer health checks.
++.br
++.br
++This will also reject email from services that use BDAT
++to send MIME text containing a bare newline (RFC 3030 Section 3
++requires canonical MIME format for text message types, defined in
++RFC 2045 Sections 2.7 and 2.8).
++.br
++.br
++Such clients can be
++excluded with smtpd_forbid_bare_newline_exclusions (or, in the case
++of BDAT violations, BDAT can be selectively disabled with
++smtpd_discard_ehlo_keyword_address_maps, or globally disabled with
++smtpd_discard_ehlo_keywords).
++.br
++.IP "\fBno\fR (default)"
++Do not require the standard
++End\-of\-DATA
++sequence <CR><LF>.<CR><LF>. Always process
++a bare <LF> as if the client sent <CR><LF>. This
++option is fully backwards compatible, but is not recommended for
++an Internet\-facing SMTP server, because it is vulnerable to SMTP smuggling.
++.br
++.br
+ .PP
+-Example:
++Recommended settings:
+ .sp
+ .in +4
+ .nf
+ .na
+ .ft C
+-# Disconnect remote SMTP clients that send bare newlines, but allow
+-# local clients with non\-standard SMTP implementations such as netcat,
+-# fax machines, or load balancer health checks.
++# Require the standard End\-of\-DATA sequence <CR><LF>.<CR><LF>.
++# Otherwise, allow bare <LF> and process it as if the client sent
++# <CR><LF>.
+ #
+-smtpd_forbid_bare_newline = yes
++# This maintains compatibility with many legitimate SMTP client
++# applications that send a mix of standard and non\-standard line
++# endings, but will fail to receive email from client implementations
++# that do not terminate DATA content with the standard End\-of\-DATA
++# sequence <CR><LF>.<CR><LF>.
++#
++# Such clients can be allowlisted with smtpd_forbid_bare_newline_exclusions.
++# The example below allowlists SMTP clients in trusted networks.
++#
++smtpd_forbid_bare_newline = normalize
+ smtpd_forbid_bare_newline_exclusions = $mynetworks
+ .fi
+ .ad
+ .ft R
+ .in -4
+ .PP
+-This feature is available in Postfix >= 3.9, 3.8.4, 3.7.9,
+-3.6.13, and 3.5.23.
+-.SH smtpd_forbid_bare_newline_exclusions (default: $mynetworks)
+-Exclude the specified clients from smtpd_forbid_bare_newline
+-enforcement. It uses the same syntax and parent\-domain matching
+-behavior as mynetworks.
+-.PP
+-Example:
++Alternative:
+ .sp
+ .in +4
+ .nf
+ .na
+ .ft C
+-# Disconnect remote SMTP clients that send bare newlines, but allow
+-# local clients with non\-standard SMTP implementations such as netcat,
+-# fax machines, or load balancer health checks.
++# Reject input lines that contain <LF> and log a "bare <LF> received"
++# error. Require that input lines end in <CR><LF>, and require the
++# standard End\-of\-DATA sequence <CR><LF>.<CR><LF>.
++#
++# This will reject email from SMTP clients that send any non\-standard
++# line endings such as web applications, netcat, or load balancer
++# health checks.
+ #
+-smtpd_forbid_bare_newline = yes
++# This will also reject email from services that use BDAT to send
++# MIME text containing a bare newline (RFC 3030 Section 3 requires
++# canonical MIME format for text message types, defined in RFC 2045
++# Sections 2.7 and 2.8).
++#
++# Such clients can be allowlisted with smtpd_forbid_bare_newline_exclusions.
++# The example below allowlists SMTP clients in trusted networks.
++#
++smtpd_forbid_bare_newline = reject
+ smtpd_forbid_bare_newline_exclusions = $mynetworks
++#
++# Alternatively, in the case of BDAT violations, BDAT can be selectively
++# disabled with smtpd_discard_ehlo_keyword_address_maps, or globally
++# disabled with smtpd_discard_ehlo_keywords.
++#
++# smtpd_discard_ehlo_keyword_address_maps = cidr:/path/to/file
++# /path/to/file:
++# 10.0.0.0/24 chunking, silent\-discard
++# smtpd_discard_ehlo_keywords = chunking, silent\-discard
+ .fi
+ .ad
+ .ft R
+ .in -4
+ .PP
++This feature with settings \fByes\fR and \fBno\fR is available
++in Postfix 3.8.4, 3.7.9, 3.6.13, and 3.5.23. Additionally, the
++settings \fBreject\fR, and \fBnormalize\fR are available with
++Postfix >= 3.9, 3.8.5, 3.7.10, 3.6.14, and 3.5.24.
++.SH smtpd_forbid_bare_newline_exclusions (default: $mynetworks)
++Exclude the specified clients from smtpd_forbid_bare_newline
++enforcement. This setting uses the same syntax and parent\-domain
++matching behavior as mynetworks.
++.PP
+ This feature is available in Postfix >= 3.9, 3.8.4, 3.7.9,
+ 3.6.13, and 3.5.23.
++.SH smtpd_forbid_bare_newline_reject_code (default: 550)
++The numerical Postfix SMTP server response code when rejecting a
++request with "smtpd_forbid_bare_newline = reject".
++Specify a 5XX status code (521 to disconnect).
++.PP
++This feature is available in Postfix >= 3.9, 3.8.5, 3.7.10,
++3.6.14, and 3.5.24.
+ .SH smtpd_forbidden_commands (default: CONNECT, GET, POST)
+ List of commands that cause the Postfix SMTP server to immediately
+ terminate the session with a 221 code. This can be used to disconnect
+--- a/man/man8/cleanup.8
++++ b/man/man8/cleanup.8
+@@ -163,6 +163,14 @@
+ .IP "\fBmessage_strip_characters (empty)\fR"
+ The set of characters that Postfix will remove from message
+ content.
++.PP
++Available in Postfix version 3.9, 3.8.5, 3.7.10, 3.6.14,
++3.5.24, and later:
++.IP "\fBcleanup_replace_stray_cr_lf (yes)\fR"
++Replace each stray <CR> or <LF> character in message
++content with a space character, to prevent outbound SMTP smuggling,
++and to make the evaluation of Postfix\-added DKIM or other signatures
++independent from how a remote mail server handles such characters.
+ .SH "BEFORE QUEUE MILTER CONTROLS"
+ .na
+ .nf
+--- a/man/man8/smtpd.8
++++ b/man/man8/smtpd.8
+@@ -811,12 +811,17 @@
+ .PP
+ Available in Postfix 3.9, 3.8.4, 3.7.9, 3.6.13, 3.5.23 and later:
+ .IP "\fBsmtpd_forbid_bare_newline (Postfix < 3.9: no)\fR"
+-Reply with "Error: bare <LF> received" and disconnect
+-when a remote SMTP client sends a line ending in <LF>, violating
+-the RFC 5321 requirement that lines must end in <CR><LF>.
++Reject or restrict input lines from an SMTP client that end in
++<LF> instead of the standard <CR><LF>.
+ .IP "\fBsmtpd_forbid_bare_newline_exclusions ($mynetworks)\fR"
+ Exclude the specified clients from smtpd_forbid_bare_newline
+ enforcement.
++.PP
++Available in Postfix 3.9, 3.8.5, 3.7.10, 3.6.14, 3.5.24 and
++later:
++.IP "\fBsmtpd_forbid_bare_newline_reject_code (550)\fR"
++The numerical Postfix SMTP server response code when rejecting a
++request with "smtpd_forbid_bare_newline = reject".
+ .SH "TARPIT CONTROLS"
+ .na
+ .nf
+--- a/mantools/postlink
++++ b/mantools/postlink
+@@ -547,8 +547,10 @@
+ s;\bsmtpd_error_sleep_time\b;<a href="postconf.5.html#smtpd_error_sleep_time">$&</a>;g;
+ s;\bsmtpd_etrn_restrictions\b;<a href="postconf.5.html#smtpd_etrn_restrictions">$&</a>;g;
+ s;\bsmtpd_expansion_filter\b;<a href="postconf.5.html#smtpd_expansion_filter">$&</a>;g;
+- s;\bsmtpd_for[-</bB>]*\n*[ <bB>]*bid_bare_newline\b;<a href="postconf.5.html#smtpd_forbi d_bare_newline">$&</a>;g;
+- s;\bsmtpd_for[-</bB>]*\n*[ <bB>]*bid_bare_newline_exclusions\b;<a href="postconf.5.html# smtpd_forbid_bare_newline_exclusions">$&</a>;g;
++ s;\bsmtpd_for[-</bB>]*\n*[ <bB>]*bid_bare_new[-</bB>]*\n*[ <bB>]*line\b;<a href="postconf.5.html#smtpd_forbid_bare_newline">$&</a>;g;
++ s;\bsmtpd_for[-</bB>]*\n*[ <bB>]*bid_bare_new[-</bB>]*\n*[ <bB>]*line_reject_code\b;<a href="postconf.5.html#smtpd_forbid_bare_newline_reject_code">$&</a>;g;
++ s;\bsmtpd_for[-</bB>]*\n*[ <bB>]*bid_bare_new[-</bB>]*\n*[ <bB>]*line_exclusions\b;<a href="postconf.5.html#smtpd_forbid_bare_newline_exclusions">$&</a>;g;
++ s;\bcleanup_replace_stray_cr_lf\b;<a href="postconf.5.html#cleanup_replace_stray_cr_lf">$&</a>;g;
+ s;\bsmtpd_for[-</bB>]*\n*[ <bB>]*bidden_commands\b;<a href="postconf.5.html#smtpd_forbidden_commands">$&</a>;g;
+ s;\bsmtpd_hard_error_limit\b;<a href="postconf.5.html#smtpd_hard_error_limit">$&</a>;g;
+ s;\bsmtpd_helo_required\b;<a href="postconf.5.html#smtpd_helo_required">$&</a>;g;
+--- a/proto/postconf.proto
++++ b/proto/postconf.proto
+@@ -18061,52 +18061,138 @@
+
+ %PARAM smtpd_forbid_bare_newline Postfix &lt; 3.9: no
+
+-<p> Reply with "Error: bare &lt;LF&gt; received" and disconnect
+-when a remote SMTP client sends a line ending in &lt;LF&gt;, violating
+-the RFC 5321 requirement that lines must end in &lt;CR&gt;&lt;LF&gt;.
+-This feature is disbled by default with Postfix &lt; 3.9. Use
+-smtpd_forbid_bare_newline_exclusions to exclude non-standard clients
+-such as netcat. Specify "smtpd_forbid_bare_newline = no" to disable
+-(not recommended for an Internet-connected MTA). </p>
++<p> Reject or restrict input lines from an SMTP client that end in
++&lt;LF&gt; instead of the standard &lt;CR&gt;&lt;LF&gt;. Such line
++endings are commonly allowed with UNIX-based SMTP servers, but they
++violate RFC 5321, and allowing such line endings can make a server
++vulnerable to <a href="https://www.postfix.org/smtp-smuggling.html">
++SMTP smuggling</a>. </p>
++
++<p> Specify one of the following values (case does not matter): </p>
++
++<dl compact>
++
++<dt> <b>normalize</b></dt> <dd> Require the standard
++End-of-DATA sequence &lt;CR&gt;&lt;LF&gt;.&lt;CR&gt;&lt;LF&gt;.
++Otherwise, allow command or message content lines ending in the
++non-standard &lt;LF&gt;, and process them as if the client sent the
++standard &lt;CR&gt;&lt;LF&gt;. <br> <br> This maintains compatibility
++with many legitimate SMTP client applications that send a mix of
++standard and non-standard line endings, but will fail to receive
++email from client implementations that do not terminate DATA content
++with the standard End-of-DATA sequence
++&lt;CR&gt;&lt;LF&gt;.&lt;CR&gt;&lt;LF&gt;. <br> <br> Such clients
++can be excluded with smtpd_forbid_bare_newline_exclusions. </dd>
++
++<dt> <b>yes</b> </dt> <dd> Compatibility alias for <b>normalize</b>. </dd>
++
++<dt> <b>reject</b> </dt> <dd> Require the standard End-of-DATA
++sequence &lt;CR&gt;&lt;LF&gt;.&lt;CR&gt;&lt;LF&gt;. Reject a command
++or message content when a line contains bare &lt;LF&gt;, log a "bare
++&lt;LF&gt; received" error, and reply with the SMTP status code in
++$smtpd_forbid_bare_newline_reject_code. <br> <br> This will reject
++email from SMTP clients that send any non-standard line endings
++such as web applications, netcat, or load balancer health checks.
++<br> <br> This will also reject email from services that use BDAT
++to send MIME text containing a bare newline (RFC 3030 Section 3
++requires canonical MIME format for text message types, defined in
++RFC 2045 Sections 2.7 and 2.8). <br> <br> Such clients can be
++excluded with smtpd_forbid_bare_newline_exclusions (or, in the case
++of BDAT violations, BDAT can be selectively disabled with
++smtpd_discard_ehlo_keyword_address_maps, or globally disabled with
++smtpd_discard_ehlo_keywords). </dd>
++
++<dt> <b>no</b> (default)</dt> <dd> Do not require the standard
++End-of-DATA
++sequence &lt;CR&gt;&lt;LF&gt;.&lt;CR&gt;&lt;LF&gt;. Always process
++a bare &lt;LF&gt; as if the client sent &lt;CR&gt;&lt;LF&gt;. This
++option is fully backwards compatible, but is not recommended for
++an Internet-facing SMTP server, because it is vulnerable to <a
++href="https://www.postfix.org/smtp-smuggling.html"> SMTP smuggling</a>.
++</dd>
+
+-<p> See <a href="https://www.postfix.org/smtp-smuggling.html">
+-https://www.postfix.org/smtp-smuggling.html</a> for details.
++</dl>
+
+-<p> Example: </p>
++<p> Recommended settings: </p>
+
+ <blockquote>
+ <pre>
+-# Disconnect remote SMTP clients that send bare newlines, but allow
+-# local clients with non-standard SMTP implementations such as netcat,
+-# fax machines, or load balancer health checks.
++# Require the standard End-of-DATA sequence &lt;CR&gt;&lt;LF&gt;.&lt;CR&gt;&lt;LF&gt;.
++# Otherwise, allow bare &lt;LF&gt; and process it as if the client sent
++# &lt;CR&gt;&lt;LF&gt;.
+ #
+-smtpd_forbid_bare_newline = yes
++# This maintains compatibility with many legitimate SMTP client
++# applications that send a mix of standard and non-standard line
++# endings, but will fail to receive email from client implementations
++# that do not terminate DATA content with the standard End-of-DATA
++# sequence &lt;CR&gt;&lt;LF&gt;.&lt;CR&gt;&lt;LF&gt;.
++#
++# Such clients can be allowlisted with smtpd_forbid_bare_newline_exclusions.
++# The example below allowlists SMTP clients in trusted networks.
++#
++smtpd_forbid_bare_newline = normalize
+ smtpd_forbid_bare_newline_exclusions = $mynetworks
+ </pre>
+ </blockquote>
+
+-<p> This feature is available in Postfix &ge; 3.9, 3.8.4, 3.7.9,
+-3.6.13, and 3.5.23. </p>
+-
+-%PARAM smtpd_forbid_bare_newline_exclusions $mynetworks
+-
+-<p> Exclude the specified clients from smtpd_forbid_bare_newline
+-enforcement. It uses the same syntax and parent-domain matching
+-behavior as mynetworks. </p>
+-
+-<p> Example: </p>
++<p> Alternative: </p>
+
+ <blockquote>
+ <pre>
+-# Disconnect remote SMTP clients that send bare newlines, but allow
+-# local clients with non-standard SMTP implementations such as netcat,
+-# fax machines, or load balancer health checks.
++# Reject input lines that contain &lt;LF&gt; and log a "bare &lt;LF&gt; received"
++# error. Require that input lines end in &lt;CR&gt;&lt;LF&gt;, and require the
++# standard End-of-DATA sequence &lt;CR&gt;&lt;LF&gt;.&lt;CR&gt;&lt;LF&gt;.
++#
++# This will reject email from SMTP clients that send any non-standard
++# line endings such as web applications, netcat, or load balancer
++# health checks.
++#
++# This will also reject email from services that use BDAT to send
++# MIME text containing a bare newline (RFC 3030 Section 3 requires
++# canonical MIME format for text message types, defined in RFC 2045
++# Sections 2.7 and 2.8).
++#
++# Such clients can be allowlisted with smtpd_forbid_bare_newline_exclusions.
++# The example below allowlists SMTP clients in trusted networks.
+ #
+-smtpd_forbid_bare_newline = yes
++smtpd_forbid_bare_newline = reject
+ smtpd_forbid_bare_newline_exclusions = $mynetworks
++#
++# Alternatively, in the case of BDAT violations, BDAT can be selectively
++# disabled with smtpd_discard_ehlo_keyword_address_maps, or globally
++# disabled with smtpd_discard_ehlo_keywords.
++#
++# smtpd_discard_ehlo_keyword_address_maps = cidr:/path/to/file
++# /path/to/file:
++# 10.0.0.0/24 chunking, silent-discard
++# smtpd_discard_ehlo_keywords = chunking, silent-discard
+ </pre>
+ </blockquote>
+
++<p> This feature with settings <b>yes</b> and <b>no</b> is available
++in Postfix 3.8.4, 3.7.9, 3.6.13, and 3.5.23. Additionally, the
++settings <b>reject</b>, and <b>normalize</b> are available with
++Postfix &ge; 3.9, 3.8.5, 3.7.10, 3.6.14, and 3.5.24. </p>
++
++%PARAM smtpd_forbid_bare_newline_exclusions $mynetworks
++
++<p> Exclude the specified clients from smtpd_forbid_bare_newline
++enforcement. This setting uses the same syntax and parent-domain
++matching behavior as mynetworks. </p>
++
+ <p> This feature is available in Postfix &ge; 3.9, 3.8.4, 3.7.9,
+ 3.6.13, and 3.5.23. </p>
+
++%PARAM smtpd_forbid_bare_newline_reject_code 550
++
++<p>
++The numerical Postfix SMTP server response code when rejecting a
++request with "smtpd_forbid_bare_newline = reject".
++Specify a 5XX status code (521 to disconnect).
++</p>
++
++<p> This feature is available in Postfix &ge; 3.9, 3.8.5, 3.7.10,
++3.6.14, and 3.5.24. </p>
++
++%PARAM cleanup_replace_stray_cr_lf yes
++
+--- a/src/cleanup/cleanup.c
++++ b/src/cleanup/cleanup.c
+@@ -145,6 +145,14 @@
+ /* .IP "\fBmessage_strip_characters (empty)\fR"
+ /* The set of characters that Postfix will remove from message
+ /* content.
++/* .PP
++/* Available in Postfix version 3.9, 3.8.5, 3.7.10, 3.6.14,
++/* 3.5.24, and later:
++/* .IP "\fBcleanup_replace_stray_cr_lf (yes)\fR"
++/* Replace each stray <CR> or <LF> character in message
++/* content with a space character, to prevent outbound SMTP smuggling,
++/* and to make the evaluation of Postfix-added DKIM or other signatures
++/* independent from how a remote mail server handles such characters.
+ /* BEFORE QUEUE MILTER CONTROLS
+ /* .ad
+ /* .fi
+--- a/src/cleanup/cleanup_init.c
++++ b/src/cleanup/cleanup_init.c
+@@ -173,6 +173,7 @@
+ int var_always_add_hdrs; /* always add missing headers */
+ int var_virt_addrlen_limit; /* stop exponential growth */
+ char *var_hfrom_format; /* header_from_format */
++int var_cleanup_mask_stray_cr_lf; /* replace stray CR or LF with space */
+
+ const CONFIG_INT_TABLE cleanup_int_table[] = {
+ VAR_HOPCOUNT_LIMIT, DEF_HOPCOUNT_LIMIT, &var_hopcount_limit, 1, 0,
+@@ -189,6 +190,7 @@
+ VAR_VERP_BOUNCE_OFF, DEF_VERP_BOUNCE_OFF, &var_verp_bounce_off,
+ VAR_AUTO_8BIT_ENC_HDR, DEF_AUTO_8BIT_ENC_HDR, &var_auto_8bit_enc_hdr,
+ VAR_ALWAYS_ADD_HDRS, DEF_ALWAYS_ADD_HDRS, &var_always_add_hdrs,
++ VAR_CLEANUP_MASK_STRAY_CR_LF, DEF_CLEANUP_MASK_STRAY_CR_LF, &var_cleanup_mask_stray_cr_lf,
+ 0,
+ };
+
+--- a/src/cleanup/cleanup_message.c
++++ b/src/cleanup/cleanup_message.c
+@@ -930,6 +930,23 @@
+ char *dst;
+
+ /*
++ * Replace each stray CR or LF with one space. These are not allowed in
++ * SMTP, and can be used to enable outbound (remote) SMTP smuggling.
++ * Replacing these early ensures that our later DKIM etc. signature will
++ * not be invalidated. Besides preventing SMTP smuggling, replacing stray
++ * <CR> or <LF> ensures that the result of signature validation by a
++ * later mail system will not depend on how that mail system handles
++ * those stray characters in an implementation-dependent manner.
++ *
++ * The input length is not changed, therefore it is safe to overwrite the
++ * input.
++ */
++ if (var_cleanup_mask_stray_cr_lf)
++ for (dst = (char *) buf; dst < buf + len; dst++)
++ if (*dst == '\r' || *dst == '\n')
++ *dst = ' ';
++
++ /*
+ * Reject unwanted characters.
+ *
+ * XXX Possible optimization: simplify the loop when the "reject" set
+--- a/src/global/cleanup_strerror.c
++++ b/src/global/cleanup_strerror.c
+@@ -73,6 +73,7 @@
+ CLEANUP_STAT_CONT, 550, "5.7.1", "message content rejected",
+ CLEANUP_STAT_WRITE, 451, "4.3.0", "queue file write error",
+ CLEANUP_STAT_NOPERM, 550, "5.7.1", "service denied",
++ CLEANUP_STAT_BARE_LF, 521, "5.5.2", "bare <LF> received",
+ };
+
+ static CLEANUP_STAT_DETAIL cleanup_stat_success = {
+--- a/src/global/cleanup_user.h
++++ b/src/global/cleanup_user.h
+@@ -65,6 +65,12 @@
+ #define CLEANUP_STAT_NOPERM (1<<9) /* Denied by non-content policy */
+
+ /*
++ * Non-cleanup errors that live in the same bitmask space, to centralize
++ * error handling.
++ */
++#define CLEANUP_STAT_BARE_LF (1<<16) /* Bare <LF> received */
++
++ /*
+ * These are set when we can't bounce even if we were asked to.
+ */
+ #define CLEANUP_STAT_MASK_CANT_BOUNCE \
+--- a/src/global/mail_params.h
++++ b/src/global/mail_params.h
+@@ -4173,11 +4173,18 @@
+ * Backwards compatibility.
+ */
+ #define VAR_SMTPD_FORBID_BARE_LF "smtpd_forbid_bare_newline"
+-#define DEF_SMTPD_FORBID_BARE_LF 0
++#define DEF_SMTPD_FORBID_BARE_LF "no"
+
+ #define VAR_SMTPD_FORBID_BARE_LF_EXCL "smtpd_forbid_bare_newline_exclusions"
+ #define DEF_SMTPD_FORBID_BARE_LF_EXCL "$" VAR_MYNETWORKS
+
++#define VAR_SMTPD_FORBID_BARE_LF_CODE "smtpd_forbid_bare_newline_reject_code"
++#define DEF_SMTPD_FORBID_BARE_LF_CODE 550
++
++#define VAR_CLEANUP_MASK_STRAY_CR_LF "cleanup_replace_stray_cr_lf"
++#define DEF_CLEANUP_MASK_STRAY_CR_LF 1
++extern int var_cleanup_mask_stray_cr_lf;
++
+ /*
+ * Share TLS sessions through tlsproxy(8).
+ */
+--- a/src/global/smtp_stream.c
++++ b/src/global/smtp_stream.c
+@@ -51,7 +51,8 @@
+ /* char *format;
+ /* va_list ap;
+ /*
+-/* int smtp_forbid_bare_lf;
++/* int smtp_detect_bare_lf;
++/* int smtp_got_bare_lf;
+ /* AUXILIARY API
+ /* int smtp_get_noexcept(vp, stream, maxlen, flags)
+ /* VSTRING *vp;
+@@ -126,16 +127,16 @@
+ /* smtp_vprintf() is the machine underneath smtp_printf().
+ /*
+ /* smtp_get_noexcept() implements the subset of smtp_get()
+-/* without long jumps for timeout or EOF errors. Instead,
++/* without timeouts and without making long jumps. Instead,
+ /* query the stream status with vstream_feof() etc.
+-/* This function will make a VSTREAM long jump (error code
+-/* SMTP_ERR_LF) when rejecting input with a bare newline byte.
++/*
++/* This function assigns smtp_got_bare_lf = smtp_detect_bare_lf,
++/* if smtp_detect_bare_lf is non-zero and the last read line
++/* was terminated with a bare newline. Otherwise, this function
++/* sets smtp_got_bare_lf to zero.
+ /*
+ /* smtp_timeout_setup() is a backwards-compatibility interface
+ /* for programs that don't require per-record deadline support.
+-/*
+-/* smtp_forbid_bare_lf controls whether smtp_get_noexcept()
+-/* will reject input with a bare newline byte.
+ /* DIAGNOSTICS
+ /* .fi
+ /* .ad
+@@ -208,7 +209,8 @@
+
+ #include "smtp_stream.h"
+
+-int smtp_forbid_bare_lf;
++int smtp_detect_bare_lf;
++int smtp_got_bare_lf;
+
+ /* smtp_timeout_reset - reset per-stream error flags, restart deadline timer */
+
+@@ -371,6 +373,8 @@
+ int last_char;
+ int next_char;
+
++ smtp_got_bare_lf = 0;
++
+ /*
+ * It's painful to do I/O with records that may span multiple buffers.
+ * Allow for partial long lines (we will read the remainder later) and
+@@ -413,11 +417,15 @@
+ */
+ case '\n':
+ vstring_truncate(vp, VSTRING_LEN(vp) - 1);
+- if (smtp_forbid_bare_lf
+- && (VSTRING_LEN(vp) == 0 || vstring_end(vp)[-1] != '\r'))
+- vstream_longjmp(stream, SMTP_ERR_LF);
+- while (VSTRING_LEN(vp) > 0 && vstring_end(vp)[-1] == '\r')
+- vstring_truncate(vp, VSTRING_LEN(vp) - 1);
++ if (smtp_detect_bare_lf) {
++ if (VSTRING_LEN(vp) == 0 || vstring_end(vp)[-1] != '\r')
++ smtp_got_bare_lf = smtp_detect_bare_lf;
++ else
++ vstring_truncate(vp, VSTRING_LEN(vp) - 1);
++ } else {
++ while (VSTRING_LEN(vp) > 0 && vstring_end(vp)[-1] == '\r')
++ vstring_truncate(vp, VSTRING_LEN(vp) - 1);
++ }
+ VSTRING_TERMINATE(vp);
+ /* FALLTRHOUGH */
+
+--- a/src/global/smtp_stream.h
++++ b/src/global/smtp_stream.h
+@@ -32,7 +32,6 @@
+ #define SMTP_ERR_QUIET 3 /* silent cleanup (application) */
+ #define SMTP_ERR_NONE 4 /* non-error case */
+ #define SMTP_ERR_DATA 5 /* application data error */
+-#define SMTP_ERR_LF 6 /* bare <LF> protocol error */
+
+ extern void smtp_stream_setup(VSTREAM *, int, int);
+ extern void PRINTFLIKE(2, 3) smtp_printf(VSTREAM *, const char *,...);
+@@ -44,7 +43,8 @@
+ extern void smtp_fwrite(const char *, ssize_t len, VSTREAM *);
+ extern void smtp_fread_buf(VSTRING *, ssize_t len, VSTREAM *);
+ extern void smtp_fputc(int, VSTREAM *);
+-extern int smtp_forbid_bare_lf;
++extern int smtp_detect_bare_lf;
++extern int smtp_got_bare_lf;
+
+ extern void smtp_vprintf(VSTREAM *, const char *, va_list);
+
+--- a/src/smtpd/smtpd.c
++++ b/src/smtpd/smtpd.c
+@@ -765,12 +765,17 @@
+ /* .PP
+ /* Available in Postfix 3.9, 3.8.4, 3.7.9, 3.6.13, 3.5.23 and later:
+ /* .IP "\fBsmtpd_forbid_bare_newline (Postfix < 3.9: no)\fR"
+-/* Reply with "Error: bare <LF> received" and disconnect
+-/* when a remote SMTP client sends a line ending in <LF>, violating
+-/* the RFC 5321 requirement that lines must end in <CR><LF>.
++/* Reject or restrict input lines from an SMTP client that end in
++/* <LF> instead of the standard <CR><LF>.
+ /* .IP "\fBsmtpd_forbid_bare_newline_exclusions ($mynetworks)\fR"
+ /* Exclude the specified clients from smtpd_forbid_bare_newline
+ /* enforcement.
++/* .PP
++/* Available in Postfix 3.9, 3.8.5, 3.7.10, 3.6.14, 3.5.24 and
++/* later:
++/* .IP "\fBsmtpd_forbid_bare_newline_reject_code (550)\fR"
++/* The numerical Postfix SMTP server response code when rejecting a
++/* request with "smtpd_forbid_bare_newline = reject".
+ /* TARPIT CONTROLS
+ /* .ad
+ /* .fi
+@@ -1476,8 +1481,10 @@
+ int var_smtpd_uproxy_tmout;
+ bool var_relay_before_rcpt_checks;
+
+-bool var_smtpd_forbid_bare_lf;
++char *var_smtpd_forbid_bare_lf;
+ char *var_smtpd_forbid_bare_lf_excl;
++int var_smtpd_forbid_bare_lf_code;
++static int bare_lf_mask;
+ static NAMADR_LIST *bare_lf_excl;
+
+ /*
+@@ -1554,7 +1561,6 @@
+ #define REASON_TIMEOUT "timeout"
+ #define REASON_LOST_CONNECTION "lost connection"
+ #define REASON_ERROR_LIMIT "too many errors"
+-#define REASON_BARE_LF "bare <LF> received"
+
+ #ifdef USE_TLS
+
+@@ -1573,6 +1579,40 @@
+ */
+ static DICT *smtpd_cmd_filter;
+
++ /*
++ * Bare LF and End-of-DATA controls (bare CR is handled elsewhere).
++ *
++ * At the smtp_get*() line reader level, setting any of these flags in the
++ * smtp_detect_bare_lf variable enables the detection of bare newlines. The
++ * line reader will set the same flags in the smtp_got_bare_lf variable
++ * after it detects a bare newline, otherwise it clears smtp_got_bare_lf.
++ *
++ * At the SMTP command level, the flags in smtp_got_bare_lf control whether
++ * commands ending in a bare newline are rejected.
++ *
++ * At the DATA and BDAT content level, the flags in smtp_got_bare_lf control
++ * whether the standard End-of-DATA sequence CRLF.CRLF is required, and
++ * whether lines ending in bare newlines are rejected.
++ *
++ * Postfix implements "delayed reject" after detecting a bare newline in BDAT
++ * or DATA content. The SMTP server delays a REJECT response until the
++ * command is finished, instead of replying and hanging up immediately. The
++ * End-of-DATA detection is secured with BARE_LF_FLAG_WANT_STD_EOD.
++ */
++#define BARE_LF_FLAG_WANT_STD_EOD (1<<0) /* Require CRLF.CRLF */
++#define BARE_LF_FLAG_REPLY_REJECT (1<<1) /* Reject bare newline */
++
++#define IS_BARE_LF_WANT_STD_EOD(m) ((m) & BARE_LF_FLAG_WANT_STD_EOD)
++#define IS_BARE_LF_REPLY_REJECT(m) ((m) & BARE_LF_FLAG_REPLY_REJECT)
++
++static const NAME_CODE bare_lf_mask_table[] = {
++ "normalize", BARE_LF_FLAG_WANT_STD_EOD, /* Default */
++ "yes", BARE_LF_FLAG_WANT_STD_EOD, /* Migration aid */
++ "reject", BARE_LF_FLAG_WANT_STD_EOD | BARE_LF_FLAG_REPLY_REJECT,
++ "no", 0,
++ 0, -1, /* error */
++};
++
+ #ifdef USE_SASL_AUTH
+
+ /*
+@@ -3515,6 +3555,7 @@
+ int curr_rec_type;
+ int prev_rec_type;
+ int first = 1;
++ int prev_got_bare_lf = 0;
+
+ /*
+ * Copy the message content. If the cleanup process has a problem, keep
+@@ -3528,12 +3569,15 @@
+ * XXX Deal with UNIX-style From_ lines at the start of message content
+ * because sendmail permits it.
+ */
+- for (prev_rec_type = 0; /* void */ ; prev_rec_type = curr_rec_type) {
++ for (prev_rec_type = 0; /* void */ ; prev_rec_type = curr_rec_type,
++ prev_got_bare_lf = smtp_got_bare_lf) {
+ if (smtp_get(state->buffer, state->client, var_line_limit,
+ SMTP_GET_FLAG_NONE) == '\n')
+ curr_rec_type = REC_TYPE_NORM;
+ else
+ curr_rec_type = REC_TYPE_CONT;
++ if (IS_BARE_LF_REPLY_REJECT(smtp_got_bare_lf))
++ state->err |= CLEANUP_STAT_BARE_LF;
+ start = vstring_str(state->buffer);
+ len = VSTRING_LEN(state->buffer);
+ if (first) {
+@@ -3546,9 +3590,14 @@
+ if (len > 0 && IS_SPACE_TAB(start[0]))
+ out_record(out_stream, REC_TYPE_NORM, "", 0);
+ }
+- if (prev_rec_type != REC_TYPE_CONT && *start == '.'
+- && (proxy == 0 ? (++start, --len) == 0 : len == 1))
+- break;
++ if (prev_rec_type != REC_TYPE_CONT && *start == '.') {
++ if (len == 1 && IS_BARE_LF_WANT_STD_EOD(smtp_detect_bare_lf)
++ && (smtp_got_bare_lf || prev_got_bare_lf))
++ /* Do not store or send to proxy filter. */
++ continue;
++ if (proxy == 0 ? (++start, --len) == 0 : len == 1)
++ break;
++ }
+ if (state->err == CLEANUP_STAT_OK) {
+ if (ENFORCING_SIZE_LIMIT(var_message_limit)
+ && var_message_limit - state->act_size < len + 2) {
+@@ -3701,6 +3750,11 @@
+ else
+ smtpd_chat_reply(state,
+ "250 2.0.0 Ok: queued as %s", state->queue_id);
++ } else if ((state->err & CLEANUP_STAT_BARE_LF) != 0) {
++ state->error_mask |= MAIL_ERROR_PROTOCOL;
++ log_whatsup(state, "reject", "bare <LF> received");
++ smtpd_chat_reply(state, "%d 5.5.2 %s Error: bare <LF> received",
++ var_smtpd_forbid_bare_lf_code, var_myhostname);
+ } else if (why && IS_SMTP_REJECT(STR(why))) {
+ state->error_mask |= MAIL_ERROR_POLICY;
+ smtpd_chat_reply(state, "%s", STR(why));
+@@ -3981,7 +4035,6 @@
+ */
+ done = 0;
+ do {
+- int payload_err;
+
+ /*
+ * Do not skip the smtp_fread_buf() call if read_len == 0. We still
+@@ -3995,10 +4048,6 @@
+ smtp_fread_buf(state->buffer, read_len, state->client);
+ state->bdat_get_stream = vstream_memreopen(
+ state->bdat_get_stream, state->buffer, O_RDONLY);
+- vstream_control(state->bdat_get_stream, CA_VSTREAM_CTL_EXCEPT,
+- CA_VSTREAM_CTL_END);
+- if ((payload_err = vstream_setjmp(state->bdat_get_stream)) != 0)
+- vstream_longjmp(state->client, payload_err);
+
+ /*
+ * Read lines from the fragment. The last line may continue in the
+@@ -4023,6 +4072,8 @@
+ /* Skip the out_record() and VSTRING_RESET() calls below. */
+ break;
+ }
++ if (IS_BARE_LF_REPLY_REJECT(smtp_got_bare_lf))
++ state->err |= CLEANUP_STAT_BARE_LF;
+ start = vstring_str(state->bdat_get_buffer);
+ len = VSTRING_LEN(state->bdat_get_buffer);
+ if (state->err == CLEANUP_STAT_OK) {
+@@ -4674,9 +4725,9 @@
+ */
+ xclient_allowed =
+ namadr_list_match(xclient_hosts, state->name, state->addr);
+- smtp_forbid_bare_lf = SMTPD_STAND_ALONE((state)) == 0
+- && var_smtpd_forbid_bare_lf
+- && !namadr_list_match(bare_lf_excl, state->name, state->addr);
++ smtp_detect_bare_lf = (SMTPD_STAND_ALONE((state)) == 0 && bare_lf_mask
++ && !namadr_list_match(bare_lf_excl, state->name, state->addr)) ?
++ bare_lf_mask : 0;
+ /* NOT: tls_reset() */
+ if (got_helo == 0)
+ helo_reset(state);
+@@ -5468,13 +5519,6 @@
+ var_myhostname);
+ break;
+
+- case SMTP_ERR_LF:
+- state->reason = REASON_BARE_LF;
+- if (vstream_setjmp(state->client) == 0)
+- smtpd_chat_reply(state, "521 5.5.2 %s Error: bare <LF> received",
+- var_myhostname);
+- break;
+-
+ case 0:
+
+ /*
+@@ -5676,6 +5720,13 @@
+ }
+ watchdog_pat();
+ smtpd_chat_query(state);
++ if (IS_BARE_LF_REPLY_REJECT(smtp_got_bare_lf)) {
++ log_whatsup(state, "reject", "bare <LF> received");
++ state->error_mask |= MAIL_ERROR_PROTOCOL;
++ smtpd_chat_reply(state, "%d 5.5.2 %s Error: bare <LF> received",
++ var_smtpd_forbid_bare_lf_code, var_myhostname);
++ break;
++ }
+ /* Safety: protect internal interfaces against malformed UTF-8. */
+ if (var_smtputf8_enable && valid_utf8_string(STR(state->buffer),
+ LEN(state->buffer)) == 0) {
+@@ -6024,11 +6075,11 @@
+ namadr_list_match(xforward_hosts, state.name, state.addr);
+
+ /*
+- * Enforce strict SMTP line endings, with compatibility exclusions.
++ * Reject or normalize bare LF, with compatibility exclusions.
+ */
+- smtp_forbid_bare_lf = SMTPD_STAND_ALONE((&state)) == 0
+- && var_smtpd_forbid_bare_lf
+- && !namadr_list_match(bare_lf_excl, state.name, state.addr);
++ smtp_detect_bare_lf = (SMTPD_STAND_ALONE((&state)) == 0 && bare_lf_mask
++ && !namadr_list_match(bare_lf_excl, state.name, state.addr)) ?
++ bare_lf_mask : 0;
+
+ /*
+ * See if we need to turn on verbose logging for this client.
+@@ -6095,6 +6146,10 @@
+ MATCH_FLAG_RETURN
+ | match_parent_style(VAR_MYNETWORKS),
+ var_smtpd_forbid_bare_lf_excl);
++ if ((bare_lf_mask = name_code(bare_lf_mask_table, NAME_CODE_FLAG_NONE,
++ var_smtpd_forbid_bare_lf)) < 0)
++ msg_fatal("bad parameter value: '%s = %s'",
++ VAR_SMTPD_FORBID_BARE_LF, var_smtpd_forbid_bare_lf);
+
+ /*
+ * Open maps before dropping privileges so we can read passwords etc.
+@@ -6390,6 +6445,7 @@
+ VAR_VIRT_MAILBOX_CODE, DEF_VIRT_MAILBOX_CODE, &var_virt_mailbox_code, 0, 0,
+ VAR_RELAY_RCPT_CODE, DEF_RELAY_RCPT_CODE, &var_relay_rcpt_code, 0, 0,
+ VAR_PLAINTEXT_CODE, DEF_PLAINTEXT_CODE, &var_plaintext_code, 0, 0,
++ VAR_SMTPD_FORBID_BARE_LF_CODE, DEF_SMTPD_FORBID_BARE_LF_CODE, &var_smtpd_forbid_bare_lf_code, 500, 599,
+ VAR_SMTPD_CRATE_LIMIT, DEF_SMTPD_CRATE_LIMIT, &var_smtpd_crate_limit, 0, 0,
+ VAR_SMTPD_CCONN_LIMIT, DEF_SMTPD_CCONN_LIMIT, &var_smtpd_cconn_limit, 0, 0,
+ VAR_SMTPD_CMAIL_LIMIT, DEF_SMTPD_CMAIL_LIMIT, &var_smtpd_cmail_limit, 0, 0,
+@@ -6452,7 +6508,6 @@
+ VAR_SMTPD_PEERNAME_LOOKUP, DEF_SMTPD_PEERNAME_LOOKUP, &var_smtpd_peername_lookup,
+ VAR_SMTPD_DELAY_OPEN, DEF_SMTPD_DELAY_OPEN, &var_smtpd_delay_open,
+ VAR_SMTPD_CLIENT_PORT_LOG, DEF_SMTPD_CLIENT_PORT_LOG, &var_smtpd_client_port_log,
+- VAR_SMTPD_FORBID_BARE_LF, DEF_SMTPD_FORBID_BARE_LF, &var_smtpd_forbid_bare_lf,
+ 0,
+ };
+ static const CONFIG_NBOOL_TABLE nbool_table[] = {
+@@ -6569,6 +6624,7 @@
+ VAR_SMTPD_DNS_RE_FILTER, DEF_SMTPD_DNS_RE_FILTER, &var_smtpd_dns_re_filter, 0, 0,
+ VAR_SMTPD_REJ_FTR_MAPS, DEF_SMTPD_REJ_FTR_MAPS, &var_smtpd_rej_ftr_maps, 0, 0,
+ VAR_SMTPD_FORBID_BARE_LF_EXCL, DEF_SMTPD_FORBID_BARE_LF_EXCL, &var_smtpd_forbid_bare_lf_excl, 0, 0,
++ VAR_SMTPD_FORBID_BARE_LF, DEF_SMTPD_FORBID_BARE_LF, &var_smtpd_forbid_bare_lf, 1, 0,
+ 0,
+ };
+ static const CONFIG_RAW_TABLE raw_table[] = {
+--- a/src/smtpd/smtpd_check.c
++++ b/src/smtpd/smtpd_check.c
+@@ -48,6 +48,11 @@
+ /*
+ /* char *smtpd_check_queue(state)
+ /* SMTPD_STATE *state;
++/* AUXILIARY FUNCTIONS
++/* void log_whatsup(state, action, text)
++/* SMTPD_STATE *state;
++/* const char *action;
++/* const char *text;
+ /* DESCRIPTION
+ /* This module implements additional checks on SMTP client requests.
+ /* A client request is validated in the context of the session state.
+@@ -146,6 +151,11 @@
+ /* The recipient address given with the RCPT TO or VRFY command.
+ /* .IP size
+ /* The message size given with the MAIL FROM command (zero if unknown).
++/* .PP
++/* log_whatsup() logs "<queueid>: <action>: <protocol state>
++/* from: <client-name[client-addr]>: <text>" plus the protocol
++/* (SMTP or ESMTP), and if available, EHLO, MAIL FROM, or RCPT
++/* TO.
+ /* BUGS
+ /* Policies like these should not be hard-coded in C, but should
+ /* be user-programmable instead.
+@@ -987,8 +997,8 @@
+
+ /* log_whatsup - log as much context as we have */
+
+-static void log_whatsup(SMTPD_STATE *state, const char *whatsup,
+- const char *text)
++void log_whatsup(SMTPD_STATE *state, const char *whatsup,
++ const char *text)
+ {
+ VSTRING *buf = vstring_alloc(100);
+
+--- a/src/smtpd/smtpd_check.h
++++ b/src/smtpd/smtpd_check.h
+@@ -25,6 +25,7 @@
+ extern char *smtpd_check_data(SMTPD_STATE *);
+ extern char *smtpd_check_eod(SMTPD_STATE *);
+ extern char *smtpd_check_policy(SMTPD_STATE *, char *);
++extern void log_whatsup(SMTPD_STATE *, const char *, const char *);
+
+ /* LICENSE
+ /* .ad
diff --git a/meta-networking/recipes-daemons/postfix/postfix_3.6.7.bb b/meta-networking/recipes-daemons/postfix/postfix_3.6.7.bb
index e91e677790..fdda2e749e 100644
--- a/meta-networking/recipes-daemons/postfix/postfix_3.6.7.bb
+++ b/meta-networking/recipes-daemons/postfix/postfix_3.6.7.bb
@@ -12,6 +12,9 @@ SRC_URI += "ftp://ftp.porcupine.org/mirrors/postfix-release/official/postfix-${P
file://0003-makedefs-Use-native-compiler-to-build-makedefs.test.patch \
file://0004-Fix-icu-config.patch \
file://0005-makedefs-add-lnsl-and-lresolv-to-SYSLIBS-by-default.patch \
+ file://0006-makedefs-Account-for-linux-6.x-version.patch \
+ file://CVE-2023-51764-1.patch \
+ file://CVE-2023-51764-2.patch \
"
SRC_URI[sha256sum] = "e471df7e0eb11c4a1e574b6d7298f635386e2843b6b3584c25a04543d587e07f"
UPSTREAM_CHECK_REGEX = "postfix\-(?P<pver>3\.6(\.\d+)+).tar.gz"
diff --git a/meta-networking/recipes-daemons/proftpd/files/CVE-2023-51713.patch b/meta-networking/recipes-daemons/proftpd/files/CVE-2023-51713.patch
new file mode 100644
index 0000000000..4b2cac1870
--- /dev/null
+++ b/meta-networking/recipes-daemons/proftpd/files/CVE-2023-51713.patch
@@ -0,0 +1,277 @@
+From 97bbe68363ccf2de0c07f67170ec64a8b4d62592 Mon Sep 17 00:00:00 2001
+From: TJ Saunders <tj@castaglia.org>
+Date: Sun, 6 Aug 2023 13:16:26 -0700
+Subject: [PATCH] Issue #1683: Avoid an edge case when handling unexpectedly
+ formatted input text from client, caused by quote/backslash semantics, by
+ skipping those semantics.
+
+Upstream-Status: Backport [https://github.com/proftpd/proftpd/commit/97bbe68363ccf2de0c07f67170ec64a8b4d62592]
+CVE: CVE-2023-51713
+Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
+---
+ include/str.h | 3 ++-
+ src/main.c | 34 +++++++++++++++++++++++++++++----
+ src/str.c | 22 +++++++++++++---------
+ tests/api/str.c | 50 ++++++++++++++++++++++++++++++++++++++++++++++++-
+ 4 files changed, 94 insertions(+), 15 deletions(-)
+
+diff --git a/include/str.h b/include/str.h
+index f08398017..1261ae2c2 100644
+--- a/include/str.h
++++ b/include/str.h
+@@ -1,6 +1,6 @@
+ /*
+ * ProFTPD - FTP server daemon
+- * Copyright (c) 2008-2020 The ProFTPD Project team
++ * Copyright (c) 2008-2023 The ProFTPD Project team
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+@@ -131,6 +131,7 @@ const char *pr_gid2str(pool *, gid_t);
+ #define PR_STR_FL_PRESERVE_COMMENTS 0x0001
+ #define PR_STR_FL_PRESERVE_WHITESPACE 0x0002
+ #define PR_STR_FL_IGNORE_CASE 0x0004
++#define PR_STR_FL_IGNORE_QUOTES 0x0008
+
+ char *pr_str_get_token(char **, char *);
+ char *pr_str_get_token2(char **, char *, size_t *);
+diff --git a/src/main.c b/src/main.c
+index ee9c1eecb..e6b70731d 100644
+--- a/src/main.c
++++ b/src/main.c
+@@ -811,8 +811,24 @@ static cmd_rec *make_ftp_cmd(pool *p, char *buf, size_t buflen, int flags) {
+ return NULL;
+ }
+
++ /* By default, pr_str_get_word will handle quotes and backslashes for
++ * escaping characters. This can produce words which are shorter, use
++ * fewer bytes than the corresponding input buffer.
++ *
++ * In this particular situation, we use the length of this initial word
++ * for determining the length of the remaining buffer bytes, assumed to
++ * contain the FTP command arguments. If this initial word is thus
++ * unexpectedly "shorter", due to nonconformant FTP text, it can lead
++ * the subsequent buffer scan, looking for CRNUL sequencees, to access
++ * unexpected memory addresses (Issue #1683).
++ *
++ * Thus for this particular situation, we tell the function to ignore/skip
++ * such quote/backslash semantics, and treat them as any other character
++ * using the IGNORE_QUOTES flag.
++ */
++
+ ptr = buf;
+- wrd = pr_str_get_word(&ptr, str_flags);
++ wrd = pr_str_get_word(&ptr, str_flags|PR_STR_FL_IGNORE_QUOTES);
+ if (wrd == NULL) {
+ /* Nothing there...bail out. */
+ pr_trace_msg("ctrl", 5, "command '%s' is empty, ignoring", buf);
+@@ -820,6 +836,11 @@ static cmd_rec *make_ftp_cmd(pool *p, char *buf, size_t buflen, int flags) {
+ return NULL;
+ }
+
++ /* Note that this first word is the FTP command. This is why we make
++ * use of the ptr buffer, which advances through the input buffer as
++ * we read words from the buffer.
++ */
++
+ subpool = make_sub_pool(p);
+ pr_pool_tag(subpool, "make_ftp_cmd pool");
+ cmd = pcalloc(subpool, sizeof(cmd_rec));
+@@ -846,6 +867,7 @@ static cmd_rec *make_ftp_cmd(pool *p, char *buf, size_t buflen, int flags) {
+ arg_len = buflen - strlen(wrd);
+ arg = pcalloc(cmd->pool, arg_len + 1);
+
++ /* Remember that ptr here is advanced past the first word. */
+ for (i = 0, j = 0; i < arg_len; i++) {
+ pr_signals_handle();
+ if (i > 1 &&
+@@ -854,14 +876,13 @@ static cmd_rec *make_ftp_cmd(pool *p, char *buf, size_t buflen, int flags) {
+
+ /* Strip out the NUL by simply not copying it into the new buffer. */
+ have_crnul = TRUE;
++
+ } else {
+ arg[j++] = ptr[i];
+ }
+ }
+
+- cmd->arg = arg;
+-
+- if (have_crnul) {
++ if (have_crnul == TRUE) {
+ char *dup_arg;
+
+ /* Now make a copy of the stripped argument; this is what we need to
+@@ -871,6 +892,11 @@ static cmd_rec *make_ftp_cmd(pool *p, char *buf, size_t buflen, int flags) {
+ ptr = dup_arg;
+ }
+
++ cmd->arg = arg;
++
++ /* Now we can read the remamining words, as command arguments, from the
++ * input buffer.
++ */
+ while ((wrd = pr_str_get_word(&ptr, str_flags)) != NULL) {
+ pr_signals_handle();
+ *((char **) push_array(tarr)) = pstrdup(cmd->pool, wrd);
+diff --git a/src/str.c b/src/str.c
+index bcca4ae4d..a2ff74daf 100644
+--- a/src/str.c
++++ b/src/str.c
+@@ -1,6 +1,6 @@
+ /*
+ * ProFTPD - FTP server daemon
+- * Copyright (c) 2008-2017 The ProFTPD Project team
++ * Copyright (c) 2008-2023 The ProFTPD Project team
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+@@ -1209,7 +1209,7 @@ int pr_str_get_nbytes(const char *str, const char *units, off_t *nbytes) {
+
+ char *pr_str_get_word(char **cp, int flags) {
+ char *res, *dst;
+- char quote_mode = 0;
++ int quote_mode = FALSE;
+
+ if (cp == NULL ||
+ !*cp ||
+@@ -1238,24 +1238,28 @@ char *pr_str_get_word(char **cp, int flags) {
+ }
+ }
+
+- if (**cp == '\"') {
+- quote_mode++;
+- (*cp)++;
++ if (!(flags & PR_STR_FL_IGNORE_QUOTES)) {
++ if (**cp == '\"') {
++ quote_mode = TRUE;
++ (*cp)++;
++ }
+ }
+
+ while (**cp && (quote_mode ? (**cp != '\"') : !PR_ISSPACE(**cp))) {
+ pr_signals_handle();
+
+- if (**cp == '\\' && quote_mode) {
+-
++ if (**cp == '\\' &&
++ quote_mode == TRUE) {
+ /* Escaped char */
+ if (*((*cp)+1)) {
+- *dst = *(++(*cp));
++ *dst++ = *(++(*cp));
++ (*cp)++;
++ continue;
+ }
+ }
+
+ *dst++ = **cp;
+- ++(*cp);
++ (*cp)++;
+ }
+
+ if (**cp) {
+diff --git a/tests/api/str.c b/tests/api/str.c
+index 050f5c563..bc64f0fb0 100644
+--- a/tests/api/str.c
++++ b/tests/api/str.c
+@@ -1,6 +1,6 @@
+ /*
+ * ProFTPD - FTP server testsuite
+- * Copyright (c) 2008-2017 The ProFTPD Project team
++ * Copyright (c) 2008-2023 The ProFTPD Project team
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+@@ -695,19 +695,23 @@ END_TEST
+ START_TEST (get_word_test) {
+ char *ok, *res, *str;
+
++ mark_point();
+ res = pr_str_get_word(NULL, 0);
+ fail_unless(res == NULL, "Failed to handle null arguments");
+ fail_unless(errno == EINVAL, "Failed to set errno to EINVAL");
+
++ mark_point();
+ str = NULL;
+ res = pr_str_get_word(&str, 0);
+ fail_unless(res == NULL, "Failed to handle null str argument");
+ fail_unless(errno == EINVAL, "Failed to set errno to EINVAL");
+
++ mark_point();
+ str = pstrdup(p, " ");
+ res = pr_str_get_word(&str, 0);
+ fail_unless(res == NULL, "Failed to handle whitespace argument");
+
++ mark_point();
+ str = pstrdup(p, " foo");
+ res = pr_str_get_word(&str, PR_STR_FL_PRESERVE_WHITESPACE);
+ fail_unless(res != NULL, "Failed to handle whitespace argument: %s",
+@@ -723,6 +727,7 @@ START_TEST (get_word_test) {
+ ok = "foo";
+ fail_unless(strcmp(res, ok) == 0, "Expected '%s', got '%s'", ok, res);
+
++ mark_point();
+ str = pstrdup(p, " # foo");
+ res = pr_str_get_word(&str, 0);
+ fail_unless(res == NULL, "Failed to handle commented argument");
+@@ -742,6 +747,8 @@ START_TEST (get_word_test) {
+ fail_unless(strcmp(res, ok) == 0, "Expected '%s', got '%s'", ok, res);
+
+ /* Test multiple embedded quotes. */
++
++ mark_point();
+ str = pstrdup(p, "foo \"bar baz\" qux \"quz norf\"");
+ res = pr_str_get_word(&str, 0);
+ fail_unless(res != NULL, "Failed to handle quoted argument: %s",
+@@ -770,6 +777,47 @@ START_TEST (get_word_test) {
+
+ ok = "quz norf";
+ fail_unless(strcmp(res, ok) == 0, "Expected '%s', got '%s'", ok, res);
++
++
++ /* Test embedded quotes with backslashes (Issue #1683). */
++ mark_point();
++
++ str = pstrdup(p, "\"\\\\SYST\"");
++ res = pr_str_get_word(&str, 0);
++ fail_unless(res != NULL, "Failed to handle quoted argument: %s",
++ strerror(errno));
++
++ ok = "\\SYST";
++ fail_unless(strcmp(res, ok) == 0, "Expected '%s', got '%s'", ok, res);
++
++ mark_point();
++ str = pstrdup(p, "\"\"\\\\SYST");
++ res = pr_str_get_word(&str, 0);
++ fail_unless(res != NULL, "Failed to handle quoted argument: %s",
++ strerror(errno));
++
++ /* Note that pr_str_get_word() is intended to be called multiple times
++ * on an advancing buffer, effectively tokenizing the buffer. This is
++ * why the function does NOT decrement its quote mode.
++ */
++ ok = "";
++ fail_unless(strcmp(res, ok) == 0, "Expected '%s', got '%s'", ok, res);
++
++ /* Now do the same tests with the IGNORE_QUOTES flag */
++ mark_point();
++
++ str = ok = pstrdup(p, "\"\\\\SYST\"");
++ res = pr_str_get_word(&str, PR_STR_FL_IGNORE_QUOTES);
++ fail_unless(res != NULL, "Failed to handle quoted argument: %s",
++ strerror(errno));
++ fail_unless(strcmp(res, ok) == 0, "Expected '%s', got '%s'", ok, res);
++
++ mark_point();
++ str = ok = pstrdup(p, "\"\"\\\\SYST");
++ res = pr_str_get_word(&str, PR_STR_FL_IGNORE_QUOTES);
++ fail_unless(res != NULL, "Failed to handle quoted argument: %s",
++ strerror(errno));
++ fail_unless(strcmp(res, ok) == 0, "Expected '%s', got '%s'", ok, res);
+ }
+ END_TEST
+
+--
+2.25.1
+
diff --git a/meta-networking/recipes-daemons/proftpd/proftpd_1.3.7c.bb b/meta-networking/recipes-daemons/proftpd/proftpd_1.3.7c.bb
index 686f1e5cdf..9d846f46a2 100644
--- a/meta-networking/recipes-daemons/proftpd/proftpd_1.3.7c.bb
+++ b/meta-networking/recipes-daemons/proftpd/proftpd_1.3.7c.bb
@@ -15,6 +15,7 @@ SRC_URI = "git://github.com/proftpd/proftpd.git;branch=${BRANCH};protocol=https
file://contrib.patch \
file://build_fixup.patch \
file://proftpd.service \
+ file://CVE-2023-51713.patch \
"
S = "${WORKDIR}/git"
diff --git a/meta-networking/recipes-daemons/radvd/radvd.inc b/meta-networking/recipes-daemons/radvd/radvd.inc
index 2afaa48411..5da31b3f0e 100644
--- a/meta-networking/recipes-daemons/radvd/radvd.inc
+++ b/meta-networking/recipes-daemons/radvd/radvd.inc
@@ -58,7 +58,8 @@ do_install:append () {
}
USERADD_PACKAGES = "${PN}"
-USERADD_PARAM:${PN} = "--system --home ${localstatedir}/run/radvd/ -M -g nogroup radvd"
+GROUPADD_PARAM:${PN} = "--system nogroup"
+USERADD_PARAM:${PN} = "--system --home ${localstatedir}/run/radvd/ -M -g nogroup --shell /sbin/nologin radvd"
pkg_postinst:${PN} () {
if [ -z "$D" -a -x /etc/init.d/populate-volatile.sh ]; then
diff --git a/meta-networking/recipes-daemons/squid/files/CVE-2023-46728.patch b/meta-networking/recipes-daemons/squid/files/CVE-2023-46728.patch
new file mode 100644
index 0000000000..b11721041e
--- /dev/null
+++ b/meta-networking/recipes-daemons/squid/files/CVE-2023-46728.patch
@@ -0,0 +1,608 @@
+Partial backport of:
+
+From 6ea12e8fb590ac6959e9356a81aa3370576568c3 Mon Sep 17 00:00:00 2001
+From: Alex Rousskov <rousskov@measurement-factory.com>
+Date: Tue, 26 Jul 2022 15:05:54 +0000
+Subject: [PATCH] Remove support for Gopher protocol (#1092)
+
+Gopher code quality remains too low for production use in most
+environments. The code is a persistent source of vulnerabilities and
+fixing it requires significant effort. We should not be spending scarce
+Project resources on improving that code, especially given the lack of
+strong demand for Gopher support.
+
+With this change, Gopher requests will be handled like any other request
+with an unknown (to Squid) protocol. For example, HTTP requests with
+Gopher URI scheme result in ERR_UNSUP_REQ.
+
+Default Squid configuration still considers TCP port 70 "safe". The
+corresponding Safe_ports ACL rule has not been removed for consistency
+sake: We consider WAIS port safe even though Squid refuses to forward
+WAIS requests:
+
+ acl Safe_ports port 70 # gopher
+ acl Safe_ports port 210 # wais
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/squid/tree/debian/patches/CVE-2023-46728.patch?h=ubuntu/focal-security&id=9ccd217ca9428c9a6597e9310a99552026b245fa
+Upstream commit https://github.com/squid-cache/squid/commit/6ea12e8fb590ac6959e9356a81aa3370576568c3]
+CVE: CVE-2023-46728
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ doc/Programming-Guide/Groups.dox | 5 -
+ doc/debug-sections.txt | 1 -
+ doc/manuals/de.po | 2 +-
+ doc/manuals/en.po | 2 +-
+ doc/manuals/en_AU.po | 2 +-
+ doc/manuals/es.po | 2 +-
+ doc/manuals/fr.po | 2 +-
+ doc/manuals/it.po | 2 +-
+ errors/af.po | 6 +-
+ errors/az.po | 6 +-
+ errors/bg.po | 6 +-
+ errors/ca.po | 6 +-
+ errors/cs.po | 6 +-
+ errors/da.po | 6 +-
+ errors/de.po | 6 +-
+ errors/el.po | 4 +-
+ errors/en.po | 6 +-
+ errors/errorpage.css | 2 +-
+ errors/es-mx.po | 3 +-
+ errors/es.po | 4 +-
+ errors/et.po | 6 +-
+ errors/fi.po | 7 +-
+ errors/fr.po | 6 +-
+ errors/he.po | 6 +-
+ errors/hu.po | 6 +-
+ errors/hy.po | 6 +-
+ errors/it.po | 4 +-
+ errors/ja.po | 6 +-
+ errors/ko.po | 6 +-
+ errors/lt.po | 6 +-
+ errors/lv.po | 6 +-
+ errors/nl.po | 6 +-
+ errors/pl.po | 6 +-
+ errors/pt-br.po | 6 +-
+ errors/pt.po | 6 +-
+ errors/ro.po | 4 +-
+ errors/ru.po | 6 +-
+ errors/sk.po | 6 +-
+ errors/sl.po | 6 +-
+ errors/sr-latn.po | 4 +-
+ errors/sv.po | 6 +-
+ errors/templates/ERR_UNSUP_REQ | 2 +-
+ errors/tr.po | 6 +-
+ errors/uk.po | 6 +-
+ errors/vi.po | 4 +-
+ errors/zh-hans.po | 6 +-
+ errors/zh-hant.po | 7 +-
+ src/FwdState.cc | 5 -
+ src/HttpRequest.cc | 6 -
+ src/IoStats.h | 2 +-
+ src/Makefile.am | 8 -
+ src/adaptation/ecap/Host.cc | 1 -
+ src/adaptation/ecap/MessageRep.cc | 2 -
+ src/anyp/ProtocolType.h | 1 -
+ src/anyp/Uri.cc | 1 -
+ src/anyp/UriScheme.cc | 3 -
+ src/cf.data.pre | 5 +-
+ src/client_side_request.cc | 4 -
+ src/error/forward.h | 2 +-
+ src/gopher.cc | 993 -----------------------
+ src/gopher.h | 29 -
+ src/http/Message.h | 1 -
+ src/mgr/IoAction.cc | 3 -
+ src/mgr/IoAction.h | 2 -
+ src/squid.8.in | 2 +-
+ src/stat.cc | 19 -
+ src/tests/Stub.am | 1 -
+ src/tests/stub_gopher.cc | 17 -
+ test-suite/squidconf/regressions-3.4.0.1 | 1 -
+ 69 files changed, 88 insertions(+), 1251 deletions(-)
+ delete mode 100644 src/gopher.cc
+ delete mode 100644 src/gopher.h
+ delete mode 100644 src/tests/stub_gopher.cc
+
+--- a/src/FwdState.cc
++++ b/src/FwdState.cc
+@@ -28,7 +28,6 @@
+ #include "fde.h"
+ #include "FwdState.h"
+ #include "globals.h"
+-#include "gopher.h"
+ #include "hier_code.h"
+ #include "http.h"
+ #include "http/Stream.h"
+@@ -1004,10 +1003,6 @@ FwdState::dispatch()
+ httpStart(this);
+ break;
+
+- case AnyP::PROTO_GOPHER:
+- gopherStart(this);
+- break;
+-
+ case AnyP::PROTO_FTP:
+ if (request->flags.ftpNative)
+ Ftp::StartRelay(this);
+--- a/src/HttpRequest.cc
++++ b/src/HttpRequest.cc
+@@ -18,7 +18,6 @@
+ #include "Downloader.h"
+ #include "err_detail_type.h"
+ #include "globals.h"
+-#include "gopher.h"
+ #include "http.h"
+ #include "http/one/RequestParser.h"
+ #include "http/Stream.h"
+@@ -556,11 +555,6 @@ HttpRequest::maybeCacheable()
+ return false;
+ break;
+
+- case AnyP::PROTO_GOPHER:
+- if (!gopherCachable(this))
+- return false;
+- break;
+-
+ case AnyP::PROTO_CACHE_OBJECT:
+ return false;
+
+--- a/src/IoStats.h
++++ b/src/IoStats.h
+@@ -22,7 +22,7 @@ public:
+ int writes;
+ int write_hist[histSize];
+ }
+- Http, Ftp, Gopher;
++ Http, Ftp;
+ };
+
+ #endif /* SQUID_IOSTATS_H_ */
+--- a/src/Makefile.am
++++ b/src/Makefile.am
+@@ -306,8 +306,6 @@ squid_SOURCES = \
+ FwdState.h \
+ Generic.h \
+ globals.h \
+- gopher.h \
+- gopher.cc \
+ helper.cc \
+ helper.h \
+ hier_code.h \
+@@ -1259,8 +1257,6 @@ tests_testCacheManager_SOURCES = \
+ fqdncache.cc \
+ FwdState.cc \
+ FwdState.h \
+- gopher.h \
+- gopher.cc \
+ hier_code.h \
+ helper.cc \
+ $(HTCPSOURCE) \
+@@ -1678,8 +1674,6 @@ tests_testEvent_SOURCES = \
+ fqdncache.cc \
+ FwdState.cc \
+ FwdState.h \
+- gopher.h \
+- gopher.cc \
+ helper.cc \
+ hier_code.h \
+ $(HTCPSOURCE) \
+@@ -1914,8 +1908,6 @@ tests_testEventLoop_SOURCES = \
+ fqdncache.cc \
+ FwdState.cc \
+ FwdState.h \
+- gopher.h \
+- gopher.cc \
+ helper.cc \
+ hier_code.h \
+ $(HTCPSOURCE) \
+@@ -2145,8 +2137,6 @@ tests_test_http_range_SOURCES = \
+ fqdncache.cc \
+ FwdState.cc \
+ FwdState.h \
+- gopher.h \
+- gopher.cc \
+ helper.cc \
+ hier_code.h \
+ $(HTCPSOURCE) \
+@@ -2461,8 +2451,6 @@ tests_testHttpRequest_SOURCES = \
+ fqdncache.cc \
+ FwdState.cc \
+ FwdState.h \
+- gopher.h \
+- gopher.cc \
+ helper.cc \
+ hier_code.h \
+ $(HTCPSOURCE) \
+@@ -3307,8 +3295,6 @@ tests_testURL_SOURCES = \
+ fqdncache.cc \
+ FwdState.cc \
+ FwdState.h \
+- gopher.h \
+- gopher.cc \
+ helper.cc \
+ hier_code.h \
+ $(HTCPSOURCE) \
+--- a/src/adaptation/ecap/Host.cc
++++ b/src/adaptation/ecap/Host.cc
+@@ -49,7 +49,6 @@ Adaptation::Ecap::Host::Host()
+ libecap::protocolHttp.assignHostId(AnyP::PROTO_HTTP);
+ libecap::protocolHttps.assignHostId(AnyP::PROTO_HTTPS);
+ libecap::protocolFtp.assignHostId(AnyP::PROTO_FTP);
+- libecap::protocolGopher.assignHostId(AnyP::PROTO_GOPHER);
+ libecap::protocolWais.assignHostId(AnyP::PROTO_WAIS);
+ libecap::protocolUrn.assignHostId(AnyP::PROTO_URN);
+ libecap::protocolWhois.assignHostId(AnyP::PROTO_WHOIS);
+--- a/src/adaptation/ecap/MessageRep.cc
++++ b/src/adaptation/ecap/MessageRep.cc
+@@ -140,8 +140,6 @@ Adaptation::Ecap::FirstLineRep::protocol
+ return libecap::protocolHttps;
+ case AnyP::PROTO_FTP:
+ return libecap::protocolFtp;
+- case AnyP::PROTO_GOPHER:
+- return libecap::protocolGopher;
+ case AnyP::PROTO_WAIS:
+ return libecap::protocolWais;
+ case AnyP::PROTO_WHOIS:
+--- a/src/anyp/ProtocolType.h
++++ b/src/anyp/ProtocolType.h
+@@ -27,7 +27,6 @@ typedef enum {
+ PROTO_HTTPS,
+ PROTO_COAP,
+ PROTO_COAPS,
+- PROTO_GOPHER,
+ PROTO_WAIS,
+ PROTO_CACHE_OBJECT,
+ PROTO_ICP,
+--- a/src/anyp/Uri.cc
++++ b/src/anyp/Uri.cc
+@@ -852,8 +852,6 @@ urlCheckRequest(const HttpRequest * r)
+ if (r->method == Http::METHOD_PUT)
+ rc = 1;
+
+- case AnyP::PROTO_GOPHER:
+-
+ case AnyP::PROTO_WAIS:
+
+ case AnyP::PROTO_WHOIS:
+--- a/src/anyp/UriScheme.cc
++++ b/src/anyp/UriScheme.cc
+@@ -87,9 +87,6 @@ AnyP::UriScheme::defaultPort() const
+ // Assuming IANA policy of allocating same port for base and TLS protocol versions will occur.
+ return 5683;
+
+- case AnyP::PROTO_GOPHER:
+- return 70;
+-
+ case AnyP::PROTO_WAIS:
+ return 210;
+
+--- a/src/client_side_request.cc
++++ b/src/client_side_request.cc
+@@ -33,7 +33,6 @@
+ #include "fd.h"
+ #include "fde.h"
+ #include "format/Token.h"
+-#include "gopher.h"
+ #include "helper.h"
+ #include "helper/Reply.h"
+ #include "http.h"
+@@ -965,9 +964,6 @@ clientHierarchical(ClientHttpRequest * h
+ if (request->url.getScheme() == AnyP::PROTO_HTTP)
+ return method.respMaybeCacheable();
+
+- if (request->url.getScheme() == AnyP::PROTO_GOPHER)
+- return gopherCachable(request);
+-
+ if (request->url.getScheme() == AnyP::PROTO_CACHE_OBJECT)
+ return 0;
+
+--- a/src/err_type.h
++++ b/src/err_type.h
+@@ -65,7 +65,7 @@ typedef enum {
+ ERR_GATEWAY_FAILURE,
+
+ /* Special Cases */
+- ERR_DIR_LISTING, /* Display of remote directory (FTP, Gopher) */
++ ERR_DIR_LISTING, /* Display of remote directory (FTP) */
+ ERR_SQUID_SIGNATURE, /* not really an error */
+ ERR_SHUTTING_DOWN,
+ ERR_PROTOCOL_UNKNOWN,
+--- a/src/HttpMsg.h
++++ b/src/HttpMsg.h
+@@ -38,7 +38,6 @@ public:
+ srcFtp = 1 << (16 + 1), ///< ftp_port or FTP server
+ srcIcap = 1 << (16 + 2), ///< traditional ICAP service without encryption
+ srcEcap = 1 << (16 + 3), ///< eCAP service that uses insecure libraries/daemons
+- srcGopher = 1 << (16 + 14), ///< Gopher server
+ srcWhois = 1 << (16 + 15), ///< Whois server
+ srcUnsafe = 0xFFFF0000, ///< Unsafe sources mask
+ srcSafe = 0x0000FFFF ///< Safe sources mask
+--- a/src/mgr/IoAction.cc
++++ b/src/mgr/IoAction.cc
+@@ -35,9 +35,6 @@ Mgr::IoActionData::operator += (const Io
+ ftp_reads += stats.ftp_reads;
+ for (int i = 0; i < IoStats::histSize; ++i)
+ ftp_read_hist[i] += stats.ftp_read_hist[i];
+- gopher_reads += stats.gopher_reads;
+- for (int i = 0; i < IoStats::histSize; ++i)
+- gopher_read_hist[i] += stats.gopher_read_hist[i];
+
+ return *this;
+ }
+--- a/src/mgr/IoAction.h
++++ b/src/mgr/IoAction.h
+@@ -27,10 +27,8 @@ public:
+ public:
+ double http_reads;
+ double ftp_reads;
+- double gopher_reads;
+ double http_read_hist[IoStats::histSize];
+ double ftp_read_hist[IoStats::histSize];
+- double gopher_read_hist[IoStats::histSize];
+ };
+
+ /// implement aggregated 'io' action
+--- a/src/stat.cc
++++ b/src/stat.cc
+@@ -206,12 +206,6 @@ GetIoStats(Mgr::IoActionData& stats)
+ for (i = 0; i < IoStats::histSize; ++i) {
+ stats.ftp_read_hist[i] = IOStats.Ftp.read_hist[i];
+ }
+-
+- stats.gopher_reads = IOStats.Gopher.reads;
+-
+- for (i = 0; i < IoStats::histSize; ++i) {
+- stats.gopher_read_hist[i] = IOStats.Gopher.read_hist[i];
+- }
+ }
+
+ void
+@@ -245,19 +239,6 @@ DumpIoStats(Mgr::IoActionData& stats, St
+ }
+
+ storeAppendPrintf(sentry, "\n");
+- storeAppendPrintf(sentry, "Gopher I/O\n");
+- storeAppendPrintf(sentry, "number of reads: %.0f\n", stats.gopher_reads);
+- storeAppendPrintf(sentry, "Read Histogram:\n");
+-
+- for (i = 0; i < IoStats::histSize; ++i) {
+- storeAppendPrintf(sentry, "%5d-%5d: %9.0f %2.0f%%\n",
+- i ? (1 << (i - 1)) + 1 : 1,
+- 1 << i,
+- stats.gopher_read_hist[i],
+- Math::doublePercent(stats.gopher_read_hist[i], stats.gopher_reads));
+- }
+-
+- storeAppendPrintf(sentry, "\n");
+ }
+
+ static const char *
+--- a/src/Makefile.in
++++ b/src/Makefile.in
+@@ -263,7 +263,7 @@ am__squid_SOURCES_DIST = AclRegs.cc Auth
+ ExternalACL.h ExternalACLEntry.cc ExternalACLEntry.h \
+ FadingCounter.h FadingCounter.cc fatal.h fatal.cc fd.h fd.cc \
+ fde.cc fde.h FileMap.h filemap.cc fqdncache.h fqdncache.cc \
+- FwdState.cc FwdState.h Generic.h globals.h gopher.h gopher.cc \
++ FwdState.cc FwdState.h Generic.h globals.h \
+ helper.cc helper.h hier_code.h HierarchyLogEntry.h htcp.cc \
+ htcp.h http.cc http.h HttpHeaderFieldStat.h HttpHdrCc.h \
+ HttpHdrCc.cc HttpHdrCc.cci HttpHdrRange.cc HttpHdrSc.cc \
+@@ -352,7 +352,7 @@ am_squid_OBJECTS = $(am__objects_1) Acce
+ EventLoop.$(OBJEXT) external_acl.$(OBJEXT) \
+ ExternalACLEntry.$(OBJEXT) FadingCounter.$(OBJEXT) \
+ fatal.$(OBJEXT) fd.$(OBJEXT) fde.$(OBJEXT) filemap.$(OBJEXT) \
+- fqdncache.$(OBJEXT) FwdState.$(OBJEXT) gopher.$(OBJEXT) \
++ fqdncache.$(OBJEXT) FwdState.$(OBJEXT) \
+ helper.$(OBJEXT) $(am__objects_5) http.$(OBJEXT) \
+ HttpHdrCc.$(OBJEXT) HttpHdrRange.$(OBJEXT) HttpHdrSc.$(OBJEXT) \
+ HttpHdrScTarget.$(OBJEXT) HttpHdrContRange.$(OBJEXT) \
+@@ -539,7 +539,7 @@ am__tests_testCacheManager_SOURCES_DIST
+ tests/stub_ETag.cc event.cc external_acl.cc \
+ ExternalACLEntry.cc fatal.h tests/stub_fatal.cc fd.h fd.cc \
+ fde.cc FileMap.h filemap.cc fqdncache.h fqdncache.cc \
+- FwdState.cc FwdState.h gopher.h gopher.cc hier_code.h \
++ FwdState.cc FwdState.h hier_code.h \
+ helper.cc htcp.cc htcp.h http.cc HttpBody.h HttpBody.cc \
+ HttpHeader.h HttpHeader.cc HttpHeaderFieldInfo.h \
+ HttpHeaderTools.h HttpHeaderTools.cc HttpHeaderFieldStat.h \
+@@ -594,7 +594,7 @@ am_tests_testCacheManager_OBJECTS = Acce
+ event.$(OBJEXT) external_acl.$(OBJEXT) \
+ ExternalACLEntry.$(OBJEXT) tests/stub_fatal.$(OBJEXT) \
+ fd.$(OBJEXT) fde.$(OBJEXT) filemap.$(OBJEXT) \
+- fqdncache.$(OBJEXT) FwdState.$(OBJEXT) gopher.$(OBJEXT) \
++ fqdncache.$(OBJEXT) FwdState.$(OBJEXT) \
+ helper.$(OBJEXT) $(am__objects_5) http.$(OBJEXT) \
+ HttpBody.$(OBJEXT) HttpHeader.$(OBJEXT) \
+ HttpHeaderTools.$(OBJEXT) HttpHdrCc.$(OBJEXT) \
+@@ -838,7 +838,7 @@ am__tests_testEvent_SOURCES_DIST = Acces
+ EventLoop.h EventLoop.cc external_acl.cc ExternalACLEntry.cc \
+ FadingCounter.cc fatal.h tests/stub_fatal.cc fd.h fd.cc fde.cc \
+ FileMap.h filemap.cc fqdncache.h fqdncache.cc FwdState.cc \
+- FwdState.h gopher.h gopher.cc helper.cc hier_code.h htcp.cc \
++ FwdState.h helper.cc hier_code.h htcp.cc \
+ htcp.h http.cc HttpBody.h HttpBody.cc \
+ tests/stub_HttpControlMsg.cc HttpHeader.h HttpHeader.cc \
+ HttpHeaderFieldInfo.h HttpHeaderTools.h HttpHeaderTools.cc \
+@@ -891,7 +891,7 @@ am_tests_testEvent_OBJECTS = AccessLogEn
+ external_acl.$(OBJEXT) ExternalACLEntry.$(OBJEXT) \
+ FadingCounter.$(OBJEXT) tests/stub_fatal.$(OBJEXT) \
+ fd.$(OBJEXT) fde.$(OBJEXT) filemap.$(OBJEXT) \
+- fqdncache.$(OBJEXT) FwdState.$(OBJEXT) gopher.$(OBJEXT) \
++ fqdncache.$(OBJEXT) FwdState.$(OBJEXT) \
+ helper.$(OBJEXT) $(am__objects_5) http.$(OBJEXT) \
+ HttpBody.$(OBJEXT) tests/stub_HttpControlMsg.$(OBJEXT) \
+ HttpHeader.$(OBJEXT) HttpHeaderTools.$(OBJEXT) \
+@@ -975,8 +975,8 @@ am__tests_testEventLoop_SOURCES_DIST = A
+ tests/stub_ETag.cc EventLoop.h EventLoop.cc event.cc \
+ external_acl.cc ExternalACLEntry.cc FadingCounter.cc fatal.h \
+ tests/stub_fatal.cc fd.h fd.cc fde.cc FileMap.h filemap.cc \
+- fqdncache.h fqdncache.cc FwdState.cc FwdState.h gopher.h \
+- gopher.cc helper.cc hier_code.h htcp.cc htcp.h http.cc \
++ fqdncache.h fqdncache.cc FwdState.cc FwdState.h \
++ helper.cc hier_code.h htcp.cc htcp.h http.cc \
+ HttpBody.h HttpBody.cc tests/stub_HttpControlMsg.cc \
+ HttpHeader.h HttpHeader.cc HttpHeaderFieldInfo.h \
+ HttpHeaderTools.h HttpHeaderTools.cc HttpHeaderFieldStat.h \
+@@ -1029,7 +1029,7 @@ am_tests_testEventLoop_OBJECTS = AccessL
+ external_acl.$(OBJEXT) ExternalACLEntry.$(OBJEXT) \
+ FadingCounter.$(OBJEXT) tests/stub_fatal.$(OBJEXT) \
+ fd.$(OBJEXT) fde.$(OBJEXT) filemap.$(OBJEXT) \
+- fqdncache.$(OBJEXT) FwdState.$(OBJEXT) gopher.$(OBJEXT) \
++ fqdncache.$(OBJEXT) FwdState.$(OBJEXT) \
+ helper.$(OBJEXT) $(am__objects_5) http.$(OBJEXT) \
+ HttpBody.$(OBJEXT) tests/stub_HttpControlMsg.$(OBJEXT) \
+ HttpHeader.$(OBJEXT) HttpHeaderTools.$(OBJEXT) \
+@@ -1187,7 +1187,7 @@ am__tests_testHttpRequest_SOURCES_DIST =
+ fs_io.cc dlink.h dlink.cc dns_internal.cc errorpage.cc \
+ tests/stub_ETag.cc external_acl.cc ExternalACLEntry.cc fatal.h \
+ tests/stub_fatal.cc fd.h fd.cc fde.cc fqdncache.h fqdncache.cc \
+- FwdState.cc FwdState.h gopher.h gopher.cc helper.cc \
++ FwdState.cc FwdState.h helper.cc \
+ hier_code.h htcp.cc htcp.h http.cc HttpBody.h HttpBody.cc \
+ tests/stub_HttpControlMsg.cc HttpHeader.h HttpHeader.cc \
+ HttpHeaderFieldInfo.h HttpHeaderTools.h HttpHeaderTools.cc \
+@@ -1243,7 +1243,7 @@ am_tests_testHttpRequest_OBJECTS = Acces
+ $(am__objects_4) errorpage.$(OBJEXT) tests/stub_ETag.$(OBJEXT) \
+ external_acl.$(OBJEXT) ExternalACLEntry.$(OBJEXT) \
+ tests/stub_fatal.$(OBJEXT) fd.$(OBJEXT) fde.$(OBJEXT) \
+- fqdncache.$(OBJEXT) FwdState.$(OBJEXT) gopher.$(OBJEXT) \
++ fqdncache.$(OBJEXT) FwdState.$(OBJEXT) \
+ helper.$(OBJEXT) $(am__objects_5) http.$(OBJEXT) \
+ HttpBody.$(OBJEXT) tests/stub_HttpControlMsg.$(OBJEXT) \
+ HttpHeader.$(OBJEXT) HttpHeaderTools.$(OBJEXT) \
+@@ -1670,8 +1670,8 @@ am__tests_testURL_SOURCES_DIST = AccessL
+ fs_io.cc dlink.h dlink.cc dns_internal.cc errorpage.cc ETag.cc \
+ event.cc external_acl.cc ExternalACLEntry.cc fatal.h \
+ tests/stub_fatal.cc fd.h fd.cc fde.cc FileMap.h filemap.cc \
+- fqdncache.h fqdncache.cc FwdState.cc FwdState.h gopher.h \
+- gopher.cc helper.cc hier_code.h htcp.cc htcp.h http.cc \
++ fqdncache.h fqdncache.cc FwdState.cc FwdState.h \
++ helper.cc hier_code.h htcp.cc htcp.h http.cc \
+ HttpBody.h HttpBody.cc tests/stub_HttpControlMsg.cc \
+ HttpHeaderFieldStat.h HttpHdrCc.h HttpHdrCc.cc HttpHdrCc.cci \
+ HttpHdrContRange.cc HttpHdrRange.cc HttpHdrSc.cc \
+@@ -1725,7 +1725,7 @@ am_tests_testURL_OBJECTS = AccessLogEntr
+ event.$(OBJEXT) external_acl.$(OBJEXT) \
+ ExternalACLEntry.$(OBJEXT) tests/stub_fatal.$(OBJEXT) \
+ fd.$(OBJEXT) fde.$(OBJEXT) filemap.$(OBJEXT) \
+- fqdncache.$(OBJEXT) FwdState.$(OBJEXT) gopher.$(OBJEXT) \
++ fqdncache.$(OBJEXT) FwdState.$(OBJEXT) \
+ helper.$(OBJEXT) $(am__objects_5) http.$(OBJEXT) \
+ HttpBody.$(OBJEXT) tests/stub_HttpControlMsg.$(OBJEXT) \
+ HttpHdrCc.$(OBJEXT) HttpHdrContRange.$(OBJEXT) \
+@@ -1925,8 +1925,8 @@ am__tests_test_http_range_SOURCES_DIST =
+ dns_internal.cc errorpage.cc tests/stub_ETag.cc event.cc \
+ FadingCounter.cc fatal.h tests/stub_libauth.cc \
+ tests/stub_fatal.cc fd.h fd.cc fde.cc FileMap.h filemap.cc \
+- fqdncache.h fqdncache.cc FwdState.cc FwdState.h gopher.h \
+- gopher.cc helper.cc hier_code.h htcp.cc htcp.h http.cc \
++ fqdncache.h fqdncache.cc FwdState.cc FwdState.h \
++ helper.cc hier_code.h htcp.cc htcp.h http.cc \
+ HttpBody.h HttpBody.cc tests/stub_HttpControlMsg.cc \
+ HttpHeaderFieldStat.h HttpHdrCc.h HttpHdrCc.cc HttpHdrCc.cci \
+ HttpHdrContRange.cc HttpHdrRange.cc HttpHdrSc.cc \
+@@ -1979,7 +1979,7 @@ am_tests_test_http_range_OBJECTS = Acces
+ FadingCounter.$(OBJEXT) tests/stub_libauth.$(OBJEXT) \
+ tests/stub_fatal.$(OBJEXT) fd.$(OBJEXT) fde.$(OBJEXT) \
+ filemap.$(OBJEXT) fqdncache.$(OBJEXT) FwdState.$(OBJEXT) \
+- gopher.$(OBJEXT) helper.$(OBJEXT) $(am__objects_5) \
++ helper.$(OBJEXT) $(am__objects_5) \
+ http.$(OBJEXT) HttpBody.$(OBJEXT) \
+ tests/stub_HttpControlMsg.$(OBJEXT) HttpHdrCc.$(OBJEXT) \
+ HttpHdrContRange.$(OBJEXT) HttpHdrRange.$(OBJEXT) \
+@@ -2131,7 +2131,7 @@ am__depfiles_remade = ./$(DEPDIR)/Access
+ ./$(DEPDIR)/external_acl.Po ./$(DEPDIR)/fatal.Po \
+ ./$(DEPDIR)/fd.Po ./$(DEPDIR)/fde.Po ./$(DEPDIR)/filemap.Po \
+ ./$(DEPDIR)/fqdncache.Po ./$(DEPDIR)/fs_io.Po \
+- ./$(DEPDIR)/globals.Po ./$(DEPDIR)/gopher.Po \
++ ./$(DEPDIR)/globals.Po \
+ ./$(DEPDIR)/helper.Po ./$(DEPDIR)/hier_code.Po \
+ ./$(DEPDIR)/htcp.Po ./$(DEPDIR)/http.Po \
+ ./$(DEPDIR)/icp_opcode.Po ./$(DEPDIR)/icp_v2.Po \
+@@ -3043,7 +3043,7 @@ squid_SOURCES = $(ACL_REGISTRATION_SOURC
+ ExternalACL.h ExternalACLEntry.cc ExternalACLEntry.h \
+ FadingCounter.h FadingCounter.cc fatal.h fatal.cc fd.h fd.cc \
+ fde.cc fde.h FileMap.h filemap.cc fqdncache.h fqdncache.cc \
+- FwdState.cc FwdState.h Generic.h globals.h gopher.h gopher.cc \
++ FwdState.cc FwdState.h Generic.h globals.h \
+ helper.cc helper.h hier_code.h HierarchyLogEntry.h \
+ $(HTCPSOURCE) http.cc http.h HttpHeaderFieldStat.h HttpHdrCc.h \
+ HttpHdrCc.cc HttpHdrCc.cci HttpHdrRange.cc HttpHdrSc.cc \
+@@ -3708,8 +3708,6 @@ tests_testCacheManager_SOURCES = \
+ fqdncache.cc \
+ FwdState.cc \
+ FwdState.h \
+- gopher.h \
+- gopher.cc \
+ hier_code.h \
+ helper.cc \
+ $(HTCPSOURCE) \
+@@ -4134,8 +4132,6 @@ tests_testEvent_SOURCES = \
+ fqdncache.cc \
+ FwdState.cc \
+ FwdState.h \
+- gopher.h \
+- gopher.cc \
+ helper.cc \
+ hier_code.h \
+ $(HTCPSOURCE) \
+@@ -4371,8 +4367,6 @@ tests_testEventLoop_SOURCES = \
+ fqdncache.cc \
+ FwdState.cc \
+ FwdState.h \
+- gopher.h \
+- gopher.cc \
+ helper.cc \
+ hier_code.h \
+ $(HTCPSOURCE) \
+@@ -4604,8 +4598,6 @@ tests_test_http_range_SOURCES = \
+ fqdncache.cc \
+ FwdState.cc \
+ FwdState.h \
+- gopher.h \
+- gopher.cc \
+ helper.cc \
+ hier_code.h \
+ $(HTCPSOURCE) \
+@@ -4924,8 +4916,6 @@ tests_testHttpRequest_SOURCES = \
+ fqdncache.cc \
+ FwdState.cc \
+ FwdState.h \
+- gopher.h \
+- gopher.cc \
+ helper.cc \
+ hier_code.h \
+ $(HTCPSOURCE) \
+@@ -5777,8 +5767,6 @@ tests_testURL_SOURCES = \
+ fqdncache.cc \
+ FwdState.cc \
+ FwdState.h \
+- gopher.h \
+- gopher.cc \
+ helper.cc \
+ hier_code.h \
+ $(HTCPSOURCE) \
+@@ -6823,7 +6811,6 @@ distclean-compile:
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fqdncache.Po@am__quote@ # am--include-marker
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/fs_io.Po@am__quote@ # am--include-marker
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/globals.Po@am__quote@ # am--include-marker
+-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/gopher.Po@am__quote@ # am--include-marker
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/helper.Po@am__quote@ # am--include-marker
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hier_code.Po@am__quote@ # am--include-marker
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/htcp.Po@am__quote@ # am--include-marker
+@@ -7804,7 +7791,6 @@ distclean: distclean-recursive
+ -rm -f ./$(DEPDIR)/fqdncache.Po
+ -rm -f ./$(DEPDIR)/fs_io.Po
+ -rm -f ./$(DEPDIR)/globals.Po
+- -rm -f ./$(DEPDIR)/gopher.Po
+ -rm -f ./$(DEPDIR)/helper.Po
+ -rm -f ./$(DEPDIR)/hier_code.Po
+ -rm -f ./$(DEPDIR)/htcp.Po
+@@ -8129,7 +8115,6 @@ maintainer-clean: maintainer-clean-recur
+ -rm -f ./$(DEPDIR)/fqdncache.Po
+ -rm -f ./$(DEPDIR)/fs_io.Po
+ -rm -f ./$(DEPDIR)/globals.Po
+- -rm -f ./$(DEPDIR)/gopher.Po
+ -rm -f ./$(DEPDIR)/helper.Po
+ -rm -f ./$(DEPDIR)/hier_code.Po
+ -rm -f ./$(DEPDIR)/htcp.Po
diff --git a/meta-networking/recipes-daemons/squid/files/CVE-2023-46846-pre1.patch b/meta-networking/recipes-daemons/squid/files/CVE-2023-46846-pre1.patch
new file mode 100644
index 0000000000..5b4e370d49
--- /dev/null
+++ b/meta-networking/recipes-daemons/squid/files/CVE-2023-46846-pre1.patch
@@ -0,0 +1,1154 @@
+Backport of:
+
+From 417da4006cf5c97d44e74431b816fc58fec9e270 Mon Sep 17 00:00:00 2001
+From: Eduard Bagdasaryan <eduard.bagdasaryan@measurement-factory.com>
+Date: Mon, 18 Mar 2019 17:48:21 +0000
+Subject: [PATCH] Fix incremental parsing of chunked quoted extensions (#310)
+
+Before this change, incremental parsing of quoted chunked extensions
+was broken for two reasons:
+
+* Http::One::Parser::skipLineTerminator() unexpectedly threw after
+ partially received quoted chunk extension value.
+
+* When Http::One::Tokenizer was unable to parse a quoted extension,
+ it incorrectly restored the input buffer to the beginning of the
+ extension value (instead of the extension itself), thus making
+ further incremental parsing iterations impossible.
+
+IMO, the reason for this problem was that Http::One::Tokenizer::qdText()
+could not distinguish two cases (returning false in both):
+
+* the end of the quoted string not yet reached
+
+* an input error, e.g., wrong/unexpected character
+
+A possible approach could be to improve Http::One::Tokenizer, making it
+aware about "needs more data" state. However, to be acceptable,
+these improvements should be done in the base Parser::Tokenizer
+class instead. These changes seem to be non-trivial and could be
+done separately and later.
+
+Another approach, used here, is to simplify the complex and error-prone
+chunked extensions parsing algorithm, fixing incremental parsing bugs
+and still parse incrementally in almost all cases. The performance
+regression could be expected only in relatively rare cases of partially
+received or malformed extensions.
+
+Also:
+* fixed parsing of partial use-original-body extension values
+* do not treat an invalid use-original-body as an unknown extension
+* optimization: parse use-original-body extension only in ICAP context
+ (i.e., where it is expected)
+* improvement: added a new API to TeChunkedParser to specify known
+ chunked extensions list
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/squid/tree/debian/patches/CVE-2023-46846-pre1.patch?h=ubuntu/focal-security&id=9ccd217ca9428c9a6597e9310a99552026b245fa
+Upstream commit https://github.com/squid-cache/squid/commit/417da4006cf5c97d44e74431b816fc58fec9e270]
+CVE: CVE-2023-46846 #Dependency Patch1
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/adaptation/icap/ModXact.cc | 21 ++++-
+ src/adaptation/icap/ModXact.h | 20 +++++
+ src/http/one/Parser.cc | 35 ++++----
+ src/http/one/Parser.h | 10 ++-
+ src/http/one/RequestParser.cc | 16 ++--
+ src/http/one/RequestParser.h | 8 +-
+ src/http/one/ResponseParser.cc | 17 ++--
+ src/http/one/ResponseParser.h | 2 +-
+ src/http/one/TeChunkedParser.cc | 139 ++++++++++++++++++--------------
+ src/http/one/TeChunkedParser.h | 41 ++++++++--
+ src/http/one/Tokenizer.cc | 104 ++++++++++++------------
+ src/http/one/Tokenizer.h | 89 ++++++++------------
+ src/http/one/forward.h | 3 +
+ src/parser/BinaryTokenizer.h | 3 +-
+ src/parser/Makefile.am | 1 +
+ src/parser/Tokenizer.cc | 40 +++++++++
+ src/parser/Tokenizer.h | 13 +++
+ src/parser/forward.h | 22 +++++
+ 18 files changed, 364 insertions(+), 220 deletions(-)
+ create mode 100644 src/parser/forward.h
+
+--- a/src/adaptation/icap/ModXact.cc
++++ b/src/adaptation/icap/ModXact.cc
+@@ -25,12 +25,13 @@
+ #include "comm.h"
+ #include "comm/Connection.h"
+ #include "err_detail_type.h"
+-#include "http/one/TeChunkedParser.h"
+ #include "HttpHeaderTools.h"
+ #include "HttpMsg.h"
+ #include "HttpReply.h"
+ #include "HttpRequest.h"
+ #include "MasterXaction.h"
++#include "parser/Tokenizer.h"
++#include "sbuf/Stream.h"
+ #include "SquidTime.h"
+
+ // flow and terminology:
+@@ -44,6 +45,8 @@ CBDATA_NAMESPACED_CLASS_INIT(Adaptation:
+
+ static const size_t TheBackupLimit = BodyPipe::MaxCapacity;
+
++const SBuf Adaptation::Icap::ChunkExtensionValueParser::UseOriginalBodyName("use-original-body");
++
+ Adaptation::Icap::ModXact::State::State()
+ {
+ memset(this, 0, sizeof(*this));
+@@ -1108,6 +1111,7 @@ void Adaptation::Icap::ModXact::decideOn
+ state.parsing = State::psBody;
+ replyHttpBodySize = 0;
+ bodyParser = new Http1::TeChunkedParser;
++ bodyParser->parseExtensionValuesWith(&extensionParser);
+ makeAdaptedBodyPipe("adapted response from the ICAP server");
+ Must(state.sending == State::sendingAdapted);
+ } else {
+@@ -1142,9 +1146,8 @@ void Adaptation::Icap::ModXact::parseBod
+ }
+
+ if (parsed) {
+- if (state.readyForUob && bodyParser->useOriginBody >= 0) {
+- prepPartialBodyEchoing(
+- static_cast<uint64_t>(bodyParser->useOriginBody));
++ if (state.readyForUob && extensionParser.sawUseOriginalBody()) {
++ prepPartialBodyEchoing(extensionParser.useOriginalBody());
+ stopParsing();
+ return;
+ }
+@@ -2014,3 +2017,14 @@ void Adaptation::Icap::ModXactLauncher::
+ }
+ }
+
++void
++Adaptation::Icap::ChunkExtensionValueParser::parse(Tokenizer &tok, const SBuf &extName)
++{
++ if (extName == UseOriginalBodyName) {
++ useOriginalBody_ = tok.udec64("use-original-body");
++ assert(useOriginalBody_ >= 0);
++ } else {
++ Ignore(tok, extName);
++ }
++}
++
+--- a/src/adaptation/icap/ModXact.h
++++ b/src/adaptation/icap/ModXact.h
+@@ -15,6 +15,7 @@
+ #include "adaptation/icap/Xaction.h"
+ #include "BodyPipe.h"
+ #include "http/one/forward.h"
++#include "http/one/TeChunkedParser.h"
+
+ /*
+ * ICAPModXact implements ICAP REQMOD and RESPMOD transaction using
+@@ -105,6 +106,23 @@ private:
+ enum State { stDisabled, stWriting, stIeof, stDone } theState;
+ };
+
++/// handles ICAP-specific chunk extensions supported by Squid
++class ChunkExtensionValueParser: public Http1::ChunkExtensionValueParser
++{
++public:
++ /* Http1::ChunkExtensionValueParser API */
++ virtual void parse(Tokenizer &tok, const SBuf &extName) override;
++
++ bool sawUseOriginalBody() const { return useOriginalBody_ >= 0; }
++ uint64_t useOriginalBody() const { assert(sawUseOriginalBody()); return static_cast<uint64_t>(useOriginalBody_); }
++
++private:
++ static const SBuf UseOriginalBodyName;
++
++ /// the value of the parsed use-original-body chunk extension (or -1)
++ int64_t useOriginalBody_ = -1;
++};
++
+ class ModXact: public Xaction, public BodyProducer, public BodyConsumer
+ {
+ CBDATA_CLASS(ModXact);
+@@ -270,6 +288,8 @@ private:
+
+ int adaptHistoryId; ///< adaptation history slot reservation
+
++ ChunkExtensionValueParser extensionParser;
++
+ class State
+ {
+
+--- a/src/http/one/Parser.cc
++++ b/src/http/one/Parser.cc
+@@ -7,10 +7,11 @@
+ */
+
+ #include "squid.h"
++#include "base/CharacterSet.h"
+ #include "Debug.h"
+ #include "http/one/Parser.h"
+-#include "http/one/Tokenizer.h"
+ #include "mime_header.h"
++#include "parser/Tokenizer.h"
+ #include "SquidConfig.h"
+
+ /// RFC 7230 section 2.6 - 7 magic octets
+@@ -61,20 +62,19 @@ Http::One::Parser::DelimiterCharacters()
+ RelaxedDelimiterCharacters() : CharacterSet::SP;
+ }
+
+-bool
+-Http::One::Parser::skipLineTerminator(Http1::Tokenizer &tok) const
++void
++Http::One::Parser::skipLineTerminator(Tokenizer &tok) const
+ {
+ if (tok.skip(Http1::CrLf()))
+- return true;
++ return;
+
+ if (Config.onoff.relaxed_header_parser && tok.skipOne(CharacterSet::LF))
+- return true;
++ return;
+
+ if (tok.atEnd() || (tok.remaining().length() == 1 && tok.remaining().at(0) == '\r'))
+- return false; // need more data
++ throw InsufficientInput();
+
+ throw TexcHere("garbage instead of CRLF line terminator");
+- return false; // unreachable, but make naive compilers happy
+ }
+
+ /// all characters except the LF line terminator
+@@ -102,7 +102,7 @@ LineCharacters()
+ void
+ Http::One::Parser::cleanMimePrefix()
+ {
+- Http1::Tokenizer tok(mimeHeaderBlock_);
++ Tokenizer tok(mimeHeaderBlock_);
+ while (tok.skipOne(RelaxedDelimiterCharacters())) {
+ (void)tok.skipAll(LineCharacters()); // optional line content
+ // LF terminator is required.
+@@ -137,7 +137,7 @@ Http::One::Parser::cleanMimePrefix()
+ void
+ Http::One::Parser::unfoldMime()
+ {
+- Http1::Tokenizer tok(mimeHeaderBlock_);
++ Tokenizer tok(mimeHeaderBlock_);
+ const auto szLimit = mimeHeaderBlock_.length();
+ mimeHeaderBlock_.clear();
+ // prevent the mime sender being able to make append() realloc/grow multiple times.
+@@ -228,7 +228,7 @@ Http::One::Parser::getHostHeaderField()
+ debugs(25, 5, "looking for " << name);
+
+ // while we can find more LF in the SBuf
+- Http1::Tokenizer tok(mimeHeaderBlock_);
++ Tokenizer tok(mimeHeaderBlock_);
+ SBuf p;
+
+ while (tok.prefix(p, LineCharacters())) {
+@@ -250,7 +250,7 @@ Http::One::Parser::getHostHeaderField()
+ p.consume(namelen + 1);
+
+ // TODO: optimize SBuf::trim to take CharacterSet directly
+- Http1::Tokenizer t(p);
++ Tokenizer t(p);
+ t.skipAll(CharacterSet::WSP);
+ p = t.remaining();
+
+@@ -278,10 +278,15 @@ Http::One::ErrorLevel()
+ }
+
+ // BWS = *( SP / HTAB ) ; WhitespaceCharacters() may relax this RFC 7230 rule
+-bool
+-Http::One::ParseBws(Tokenizer &tok)
++void
++Http::One::ParseBws(Parser::Tokenizer &tok)
+ {
+- if (const auto count = tok.skipAll(Parser::WhitespaceCharacters())) {
++ const auto count = tok.skipAll(Parser::WhitespaceCharacters());
++
++ if (tok.atEnd())
++ throw InsufficientInput(); // even if count is positive
++
++ if (count) {
+ // Generating BWS is a MUST-level violation so warn about it as needed.
+ debugs(33, ErrorLevel(), "found " << count << " BWS octets");
+ // RFC 7230 says we MUST parse BWS, so we fall through even if
+@@ -289,6 +294,6 @@ Http::One::ParseBws(Tokenizer &tok)
+ }
+ // else we successfully "parsed" an empty BWS sequence
+
+- return true;
++ // success: no more BWS characters expected
+ }
+
+--- a/src/http/one/Parser.h
++++ b/src/http/one/Parser.h
+@@ -12,6 +12,7 @@
+ #include "anyp/ProtocolVersion.h"
+ #include "http/one/forward.h"
+ #include "http/StatusCode.h"
++#include "parser/forward.h"
+ #include "sbuf/SBuf.h"
+
+ namespace Http {
+@@ -40,6 +41,7 @@ class Parser : public RefCountable
+ {
+ public:
+ typedef SBuf::size_type size_type;
++ typedef ::Parser::Tokenizer Tokenizer;
+
+ Parser() : parseStatusCode(Http::scNone), parsingStage_(HTTP_PARSE_NONE), hackExpectsMime_(false) {}
+ virtual ~Parser() {}
+@@ -118,11 +120,11 @@ protected:
+ * detect and skip the CRLF or (if tolerant) LF line terminator
+ * consume from the tokenizer.
+ *
+- * throws if non-terminator is detected.
++ * \throws exception on bad or InsuffientInput.
+ * \retval true only if line terminator found.
+ * \retval false incomplete or missing line terminator, need more data.
+ */
+- bool skipLineTerminator(Http1::Tokenizer &tok) const;
++ void skipLineTerminator(Tokenizer &) const;
+
+ /**
+ * Scan to find the mime headers block for current message.
+@@ -159,8 +161,8 @@ private:
+ };
+
+ /// skips and, if needed, warns about RFC 7230 BWS ("bad" whitespace)
+-/// \returns true (always; unlike all the skip*() functions)
+-bool ParseBws(Tokenizer &tok);
++/// \throws InsufficientInput when the end of BWS cannot be confirmed
++void ParseBws(Parser::Tokenizer &);
+
+ /// the right debugs() level for logging HTTP violation messages
+ int ErrorLevel();
+--- a/src/http/one/RequestParser.cc
++++ b/src/http/one/RequestParser.cc
+@@ -9,8 +9,8 @@
+ #include "squid.h"
+ #include "Debug.h"
+ #include "http/one/RequestParser.h"
+-#include "http/one/Tokenizer.h"
+ #include "http/ProtocolVersion.h"
++#include "parser/Tokenizer.h"
+ #include "profiler/Profiler.h"
+ #include "SquidConfig.h"
+
+@@ -64,7 +64,7 @@ Http::One::RequestParser::skipGarbageLin
+ * RFC 7230 section 2.6, 3.1 and 3.5
+ */
+ bool
+-Http::One::RequestParser::parseMethodField(Http1::Tokenizer &tok)
++Http::One::RequestParser::parseMethodField(Tokenizer &tok)
+ {
+ // method field is a sequence of TCHAR.
+ // Limit to 32 characters to prevent overly long sequences of non-HTTP
+@@ -145,7 +145,7 @@ Http::One::RequestParser::RequestTargetC
+ }
+
+ bool
+-Http::One::RequestParser::parseUriField(Http1::Tokenizer &tok)
++Http::One::RequestParser::parseUriField(Tokenizer &tok)
+ {
+ /* Arbitrary 64KB URI upper length limit.
+ *
+@@ -178,7 +178,7 @@ Http::One::RequestParser::parseUriField(
+ }
+
+ bool
+-Http::One::RequestParser::parseHttpVersionField(Http1::Tokenizer &tok)
++Http::One::RequestParser::parseHttpVersionField(Tokenizer &tok)
+ {
+ static const SBuf http1p0("HTTP/1.0");
+ static const SBuf http1p1("HTTP/1.1");
+@@ -253,7 +253,7 @@ Http::One::RequestParser::skipDelimiter(
+
+ /// Parse CRs at the end of request-line, just before the terminating LF.
+ bool
+-Http::One::RequestParser::skipTrailingCrs(Http1::Tokenizer &tok)
++Http::One::RequestParser::skipTrailingCrs(Tokenizer &tok)
+ {
+ if (Config.onoff.relaxed_header_parser) {
+ (void)tok.skipAllTrailing(CharacterSet::CR); // optional; multiple OK
+@@ -289,12 +289,12 @@ Http::One::RequestParser::parseRequestFi
+ // Earlier, skipGarbageLines() took care of any leading LFs (if allowed).
+ // Now, the request line has to end at the first LF.
+ static const CharacterSet lineChars = CharacterSet::LF.complement("notLF");
+- ::Parser::Tokenizer lineTok(buf_);
++ Tokenizer lineTok(buf_);
+ if (!lineTok.prefix(line, lineChars) || !lineTok.skip('\n')) {
+ if (buf_.length() >= Config.maxRequestHeaderSize) {
+ /* who should we blame for our failure to parse this line? */
+
+- Http1::Tokenizer methodTok(buf_);
++ Tokenizer methodTok(buf_);
+ if (!parseMethodField(methodTok))
+ return -1; // blame a bad method (or its delimiter)
+
+@@ -308,7 +308,7 @@ Http::One::RequestParser::parseRequestFi
+ return 0;
+ }
+
+- Http1::Tokenizer tok(line);
++ Tokenizer tok(line);
+
+ if (!parseMethodField(tok))
+ return -1;
+--- a/src/http/one/RequestParser.h
++++ b/src/http/one/RequestParser.h
+@@ -54,11 +54,11 @@ private:
+ bool doParse(const SBuf &aBuf);
+
+ /* all these return false and set parseStatusCode on parsing failures */
+- bool parseMethodField(Http1::Tokenizer &);
+- bool parseUriField(Http1::Tokenizer &);
+- bool parseHttpVersionField(Http1::Tokenizer &);
++ bool parseMethodField(Tokenizer &);
++ bool parseUriField(Tokenizer &);
++ bool parseHttpVersionField(Tokenizer &);
+ bool skipDelimiter(const size_t count, const char *where);
+- bool skipTrailingCrs(Http1::Tokenizer &tok);
++ bool skipTrailingCrs(Tokenizer &tok);
+
+ bool http0() const {return !msgProtocol_.major;}
+ static const CharacterSet &RequestTargetCharacters();
+--- a/src/http/one/ResponseParser.cc
++++ b/src/http/one/ResponseParser.cc
+@@ -9,8 +9,8 @@
+ #include "squid.h"
+ #include "Debug.h"
+ #include "http/one/ResponseParser.h"
+-#include "http/one/Tokenizer.h"
+ #include "http/ProtocolVersion.h"
++#include "parser/Tokenizer.h"
+ #include "profiler/Profiler.h"
+ #include "SquidConfig.h"
+
+@@ -47,7 +47,7 @@ Http::One::ResponseParser::firstLineSize
+ // NP: we found the protocol version and consumed it already.
+ // just need the status code and reason phrase
+ int
+-Http::One::ResponseParser::parseResponseStatusAndReason(Http1::Tokenizer &tok, const CharacterSet &WspDelim)
++Http::One::ResponseParser::parseResponseStatusAndReason(Tokenizer &tok, const CharacterSet &WspDelim)
+ {
+ if (!completedStatus_) {
+ debugs(74, 9, "seek status-code in: " << tok.remaining().substr(0,10) << "...");
+@@ -87,14 +87,13 @@ Http::One::ResponseParser::parseResponse
+ static const CharacterSet phraseChars = CharacterSet::WSP + CharacterSet::VCHAR + CharacterSet::OBSTEXT;
+ (void)tok.prefix(reasonPhrase_, phraseChars); // optional, no error if missing
+ try {
+- if (skipLineTerminator(tok)) {
+- debugs(74, DBG_DATA, "parse remaining buf={length=" << tok.remaining().length() << ", data='" << tok.remaining() << "'}");
+- buf_ = tok.remaining(); // resume checkpoint
+- return 1;
+- }
++ skipLineTerminator(tok);
++ buf_ = tok.remaining(); // resume checkpoint
++ debugs(74, DBG_DATA, Raw("leftovers", buf_.rawContent(), buf_.length()));
++ return 1;
++ } catch (const InsufficientInput &) {
+ reasonPhrase_.clear();
+ return 0; // need more to be sure we have it all
+-
+ } catch (const std::exception &ex) {
+ debugs(74, 6, "invalid status-line: " << ex.what());
+ }
+@@ -119,7 +118,7 @@ Http::One::ResponseParser::parseResponse
+ int
+ Http::One::ResponseParser::parseResponseFirstLine()
+ {
+- Http1::Tokenizer tok(buf_);
++ Tokenizer tok(buf_);
+
+ const CharacterSet &WspDelim = DelimiterCharacters();
+
+--- a/src/http/one/ResponseParser.h
++++ b/src/http/one/ResponseParser.h
+@@ -43,7 +43,7 @@ public:
+
+ private:
+ int parseResponseFirstLine();
+- int parseResponseStatusAndReason(Http1::Tokenizer&, const CharacterSet &);
++ int parseResponseStatusAndReason(Tokenizer&, const CharacterSet &);
+
+ /// magic prefix for identifying ICY response messages
+ static const SBuf IcyMagic;
+--- a/src/http/one/TeChunkedParser.cc
++++ b/src/http/one/TeChunkedParser.cc
+@@ -13,10 +13,13 @@
+ #include "http/one/Tokenizer.h"
+ #include "http/ProtocolVersion.h"
+ #include "MemBuf.h"
++#include "parser/Tokenizer.h"
+ #include "Parsing.h"
++#include "sbuf/Stream.h"
+ #include "SquidConfig.h"
+
+-Http::One::TeChunkedParser::TeChunkedParser()
++Http::One::TeChunkedParser::TeChunkedParser():
++ customExtensionValueParser(nullptr)
+ {
+ // chunked encoding only exists in HTTP/1.1
+ Http1::Parser::msgProtocol_ = Http::ProtocolVersion(1,1);
+@@ -31,7 +34,11 @@ Http::One::TeChunkedParser::clear()
+ buf_.clear();
+ theChunkSize = theLeftBodySize = 0;
+ theOut = NULL;
+- useOriginBody = -1;
++ // XXX: We do not reset customExtensionValueParser here. Based on the
++ // clear() API description, we must, but it makes little sense and could
++ // break method callers if they appear because some of them may forget to
++ // reset customExtensionValueParser. TODO: Remove Http1::Parser as our
++ // parent class and this unnecessary method with it.
+ }
+
+ bool
+@@ -49,14 +56,14 @@ Http::One::TeChunkedParser::parse(const
+ if (parsingStage_ == Http1::HTTP_PARSE_NONE)
+ parsingStage_ = Http1::HTTP_PARSE_CHUNK_SZ;
+
+- Http1::Tokenizer tok(buf_);
++ Tokenizer tok(buf_);
+
+ // loop for as many chunks as we can
+ // use do-while instead of while so that we can incrementally
+ // restart in the middle of a chunk/frame
+ do {
+
+- if (parsingStage_ == Http1::HTTP_PARSE_CHUNK_EXT && !parseChunkExtension(tok, theChunkSize))
++ if (parsingStage_ == Http1::HTTP_PARSE_CHUNK_EXT && !parseChunkMetadataSuffix(tok))
+ return false;
+
+ if (parsingStage_ == Http1::HTTP_PARSE_CHUNK && !parseChunkBody(tok))
+@@ -80,7 +87,7 @@ Http::One::TeChunkedParser::needsMoreSpa
+
+ /// RFC 7230 section 4.1 chunk-size
+ bool
+-Http::One::TeChunkedParser::parseChunkSize(Http1::Tokenizer &tok)
++Http::One::TeChunkedParser::parseChunkSize(Tokenizer &tok)
+ {
+ Must(theChunkSize <= 0); // Should(), really
+
+@@ -104,66 +111,75 @@ Http::One::TeChunkedParser::parseChunkSi
+ return false; // should not be reachable
+ }
+
+-/**
+- * Parses chunk metadata suffix, looking for interesting extensions and/or
+- * getting to the line terminator. RFC 7230 section 4.1.1 and its Errata #4667:
+- *
+- * chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
+- * chunk-ext-name = token
+- * chunk-ext-val = token / quoted-string
+- *
+- * ICAP 'use-original-body=N' extension is supported.
+- */
+-bool
+-Http::One::TeChunkedParser::parseChunkExtension(Http1::Tokenizer &tok, bool skipKnown)
+-{
+- SBuf ext;
+- SBuf value;
+- while (
+- ParseBws(tok) && // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
+- tok.skip(';') &&
+- ParseBws(tok) && // Bug 4492: ICAP servers send SP before chunk-ext-name
+- tok.prefix(ext, CharacterSet::TCHAR)) { // chunk-ext-name
+-
+- // whole value part is optional. if no '=' expect next chunk-ext
+- if (ParseBws(tok) && tok.skip('=') && ParseBws(tok)) {
+-
+- if (!skipKnown) {
+- if (ext.cmp("use-original-body",17) == 0 && tok.int64(useOriginBody, 10)) {
+- debugs(94, 3, "Found chunk extension " << ext << "=" << useOriginBody);
+- buf_ = tok.remaining(); // parse checkpoint
+- continue;
+- }
+- }
+-
+- debugs(94, 5, "skipping unknown chunk extension " << ext);
+-
+- // unknown might have a value token or quoted-string
+- if (tok.quotedStringOrToken(value) && !tok.atEnd()) {
+- buf_ = tok.remaining(); // parse checkpoint
+- continue;
+- }
+-
+- // otherwise need more data OR corrupt syntax
+- break;
+- }
+-
+- if (!tok.atEnd())
+- buf_ = tok.remaining(); // parse checkpoint (unless there might be more token name)
+- }
+-
+- if (skipLineTerminator(tok)) {
+- buf_ = tok.remaining(); // checkpoint
+- // non-0 chunk means data, 0-size means optional Trailer follows
++/// Parses "[chunk-ext] CRLF" from RFC 7230 section 4.1.1:
++/// chunk = chunk-size [ chunk-ext ] CRLF chunk-data CRLF
++/// last-chunk = 1*"0" [ chunk-ext ] CRLF
++bool
++Http::One::TeChunkedParser::parseChunkMetadataSuffix(Tokenizer &tok)
++{
++ // Code becomes much simpler when incremental parsing functions throw on
++ // bad or insufficient input, like in the code below. TODO: Expand up.
++ try {
++ parseChunkExtensions(tok); // a possibly empty chunk-ext list
++ skipLineTerminator(tok);
++ buf_ = tok.remaining();
+ parsingStage_ = theChunkSize ? Http1::HTTP_PARSE_CHUNK : Http1::HTTP_PARSE_MIME;
+ return true;
++ } catch (const InsufficientInput &) {
++ tok.reset(buf_); // backtrack to the last commit point
++ return false;
+ }
++ // other exceptions bubble up to kill message parsing
++}
++
++/// Parses the chunk-ext list (RFC 7230 section 4.1.1 and its Errata #4667):
++/// chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
++void
++Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &tok)
++{
++ do {
++ ParseBws(tok); // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
+
+- return false;
++ if (!tok.skip(';'))
++ return; // reached the end of extensions (if any)
++
++ parseOneChunkExtension(tok);
++ buf_ = tok.remaining(); // got one extension
++ } while (true);
++}
++
++void
++Http::One::ChunkExtensionValueParser::Ignore(Tokenizer &tok, const SBuf &extName)
++{
++ const auto ignoredValue = tokenOrQuotedString(tok);
++ debugs(94, 5, extName << " with value " << ignoredValue);
++}
++
++/// Parses a single chunk-ext list element:
++/// chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
++void
++Http::One::TeChunkedParser::parseOneChunkExtension(Tokenizer &tok)
++{
++ ParseBws(tok); // Bug 4492: ICAP servers send SP before chunk-ext-name
++
++ const auto extName = tok.prefix("chunk-ext-name", CharacterSet::TCHAR);
++
++ ParseBws(tok);
++
++ if (!tok.skip('='))
++ return; // parsed a valueless chunk-ext
++
++ ParseBws(tok);
++
++ // optimization: the only currently supported extension needs last-chunk
++ if (!theChunkSize && customExtensionValueParser)
++ customExtensionValueParser->parse(tok, extName);
++ else
++ ChunkExtensionValueParser::Ignore(tok, extName);
+ }
+
+ bool
+-Http::One::TeChunkedParser::parseChunkBody(Http1::Tokenizer &tok)
++Http::One::TeChunkedParser::parseChunkBody(Tokenizer &tok)
+ {
+ if (theLeftBodySize > 0) {
+ buf_ = tok.remaining(); // sync buffers before buf_ use
+@@ -188,17 +204,20 @@ Http::One::TeChunkedParser::parseChunkBo
+ }
+
+ bool
+-Http::One::TeChunkedParser::parseChunkEnd(Http1::Tokenizer &tok)
++Http::One::TeChunkedParser::parseChunkEnd(Tokenizer &tok)
+ {
+ Must(theLeftBodySize == 0); // Should(), really
+
+- if (skipLineTerminator(tok)) {
++ try {
++ skipLineTerminator(tok);
+ buf_ = tok.remaining(); // parse checkpoint
+ theChunkSize = 0; // done with the current chunk
+ parsingStage_ = Http1::HTTP_PARSE_CHUNK_SZ;
+ return true;
+ }
+-
+- return false;
++ catch (const InsufficientInput &) {
++ return false;
++ }
++ // other exceptions bubble up to kill message parsing
+ }
+
+--- a/src/http/one/TeChunkedParser.h
++++ b/src/http/one/TeChunkedParser.h
+@@ -18,6 +18,26 @@ namespace Http
+ namespace One
+ {
+
++using ::Parser::InsufficientInput;
++
++// TODO: Move this class into http/one/ChunkExtensionValueParser.*
++/// A customizable parser of a single chunk extension value (chunk-ext-val).
++/// From RFC 7230 section 4.1.1 and its Errata #4667:
++/// chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
++/// chunk-ext-name = token
++/// chunk-ext-val = token / quoted-string
++class ChunkExtensionValueParser
++{
++public:
++ typedef ::Parser::Tokenizer Tokenizer;
++
++ /// extracts and ignores the value of a named extension
++ static void Ignore(Tokenizer &tok, const SBuf &extName);
++
++ /// extracts and then interprets (or ignores) the extension value
++ virtual void parse(Tokenizer &tok, const SBuf &extName) = 0;
++};
++
+ /**
+ * An incremental parser for chunked transfer coding
+ * defined in RFC 7230 section 4.1.
+@@ -25,7 +45,7 @@ namespace One
+ *
+ * The parser shovels content bytes from the raw
+ * input buffer into the content output buffer, both caller-supplied.
+- * Ignores chunk extensions except for ICAP's ieof.
++ * Chunk extensions like use-original-body are handled via parseExtensionValuesWith().
+ * Trailers are available via mimeHeader() if wanted.
+ */
+ class TeChunkedParser : public Http1::Parser
+@@ -37,6 +57,10 @@ public:
+ /// set the buffer to be used to store decoded chunk data
+ void setPayloadBuffer(MemBuf *parsedContent) {theOut = parsedContent;}
+
++ /// Instead of ignoring all chunk extension values, give the supplied
++ /// parser a chance to handle them. Only applied to last-chunk (for now).
++ void parseExtensionValuesWith(ChunkExtensionValueParser *parser) { customExtensionValueParser = parser; }
++
+ bool needsMoreSpace() const;
+
+ /* Http1::Parser API */
+@@ -45,17 +69,20 @@ public:
+ virtual Parser::size_type firstLineSize() const {return 0;} // has no meaning with multiple chunks
+
+ private:
+- bool parseChunkSize(Http1::Tokenizer &tok);
+- bool parseChunkExtension(Http1::Tokenizer &tok, bool skipKnown);
+- bool parseChunkBody(Http1::Tokenizer &tok);
+- bool parseChunkEnd(Http1::Tokenizer &tok);
++ bool parseChunkSize(Tokenizer &tok);
++ bool parseChunkMetadataSuffix(Tokenizer &);
++ void parseChunkExtensions(Tokenizer &);
++ void parseOneChunkExtension(Tokenizer &);
++ bool parseChunkBody(Tokenizer &tok);
++ bool parseChunkEnd(Tokenizer &tok);
+
+ MemBuf *theOut;
+ uint64_t theChunkSize;
+ uint64_t theLeftBodySize;
+
+-public:
+- int64_t useOriginBody;
++ /// An optional plugin for parsing and interpreting custom chunk-ext-val.
++ /// This "visitor" object is owned by our creator.
++ ChunkExtensionValueParser *customExtensionValueParser;
+ };
+
+ } // namespace One
+--- a/src/http/one/Tokenizer.cc
++++ b/src/http/one/Tokenizer.cc
+@@ -8,35 +8,18 @@
+
+ #include "squid.h"
+ #include "Debug.h"
++#include "http/one/Parser.h"
+ #include "http/one/Tokenizer.h"
++#include "parser/Tokenizer.h"
++#include "sbuf/Stream.h"
+
+-bool
+-Http::One::Tokenizer::quotedString(SBuf &returnedToken, const bool http1p0)
++/// Extracts quoted-string after the caller removes the initial '"'.
++/// \param http1p0 whether to prohibit \-escaped characters in quoted strings
++/// \throws InsufficientInput when input can be a token _prefix_
++/// \returns extracted quoted string (without quotes and with chars unescaped)
++static SBuf
++parseQuotedStringSuffix(Parser::Tokenizer &tok, const bool http1p0)
+ {
+- checkpoint();
+-
+- if (!skip('"'))
+- return false;
+-
+- return qdText(returnedToken, http1p0);
+-}
+-
+-bool
+-Http::One::Tokenizer::quotedStringOrToken(SBuf &returnedToken, const bool http1p0)
+-{
+- checkpoint();
+-
+- if (!skip('"'))
+- return prefix(returnedToken, CharacterSet::TCHAR);
+-
+- return qdText(returnedToken, http1p0);
+-}
+-
+-bool
+-Http::One::Tokenizer::qdText(SBuf &returnedToken, const bool http1p0)
+-{
+- // the initial DQUOTE has been skipped by the caller
+-
+ /*
+ * RFC 1945 - defines qdtext:
+ * inclusive of LWS (which includes CR and LF)
+@@ -61,12 +44,17 @@ Http::One::Tokenizer::qdText(SBuf &retur
+ // best we can do is a conditional reference since http1p0 value may change per-client
+ const CharacterSet &tokenChars = (http1p0 ? qdtext1p0 : qdtext1p1);
+
+- for (;;) {
+- SBuf::size_type prefixLen = buf().findFirstNotOf(tokenChars);
+- returnedToken.append(consume(prefixLen));
++ SBuf parsedToken;
++
++ while (!tok.atEnd()) {
++ SBuf qdText;
++ if (tok.prefix(qdText, tokenChars))
++ parsedToken.append(qdText);
++
++ if (!http1p0 && tok.skip('\\')) { // HTTP/1.1 allows quoted-pair, HTTP/1.0 does not
++ if (tok.atEnd())
++ break;
+
+- // HTTP/1.1 allows quoted-pair, HTTP/1.0 does not
+- if (!http1p0 && skip('\\')) {
+ /* RFC 7230 section 3.2.6
+ *
+ * The backslash octet ("\") can be used as a single-octet quoting
+@@ -78,32 +66,42 @@ Http::One::Tokenizer::qdText(SBuf &retur
+ */
+ static const CharacterSet qPairChars = CharacterSet::HTAB + CharacterSet::SP + CharacterSet::VCHAR + CharacterSet::OBSTEXT;
+ SBuf escaped;
+- if (!prefix(escaped, qPairChars, 1)) {
+- returnedToken.clear();
+- restoreLastCheckpoint();
+- return false;
+- }
+- returnedToken.append(escaped);
++ if (!tok.prefix(escaped, qPairChars, 1))
++ throw TexcHere("invalid escaped character in quoted-pair");
++
++ parsedToken.append(escaped);
+ continue;
++ }
+
+- } else if (skip('"')) {
+- break; // done
++ if (tok.skip('"'))
++ return parsedToken; // may be empty
+
+- } else if (atEnd()) {
+- // need more data
+- returnedToken.clear();
+- restoreLastCheckpoint();
+- return false;
+- }
++ if (tok.atEnd())
++ break;
+
+- // else, we have an error
+- debugs(24, 8, "invalid bytes for set " << tokenChars.name);
+- returnedToken.clear();
+- restoreLastCheckpoint();
+- return false;
++ throw TexcHere(ToSBuf("invalid bytes for set ", tokenChars.name));
+ }
+
+- // found the whole string
+- return true;
++ throw Http::One::InsufficientInput();
++}
++
++SBuf
++Http::One::tokenOrQuotedString(Parser::Tokenizer &tok, const bool http1p0)
++{
++ if (tok.skip('"'))
++ return parseQuotedStringSuffix(tok, http1p0);
++
++ if (tok.atEnd())
++ throw InsufficientInput();
++
++ SBuf parsedToken;
++ if (!tok.prefix(parsedToken, CharacterSet::TCHAR))
++ throw TexcHere("invalid input while expecting an HTTP token");
++
++ if (tok.atEnd())
++ throw InsufficientInput();
++
++ // got the complete token
++ return parsedToken;
+ }
+
+--- a/src/http/one/Tokenizer.h
++++ b/src/http/one/Tokenizer.h
+@@ -9,68 +9,47 @@
+ #ifndef SQUID_SRC_HTTP_ONE_TOKENIZER_H
+ #define SQUID_SRC_HTTP_ONE_TOKENIZER_H
+
+-#include "parser/Tokenizer.h"
++#include "parser/forward.h"
++#include "sbuf/forward.h"
+
+ namespace Http {
+ namespace One {
+
+ /**
+- * Lexical processor extended to tokenize HTTP/1.x syntax.
++ * Extracts either an HTTP/1 token or quoted-string while dealing with
++ * possibly incomplete input typical for incremental text parsers.
++ * Unescapes escaped characters in HTTP/1.1 quoted strings.
+ *
+- * \see ::Parser::Tokenizer for more detail
++ * \param http1p0 whether to prohibit \-escaped characters in quoted strings
++ * \throws InsufficientInput as appropriate, including on unterminated tokens
++ * \returns extracted token or quoted string (without quotes)
++ *
++ * Governed by:
++ * - RFC 1945 section 2.1
++ * "
++ * A string of text is parsed as a single word if it is quoted using
++ * double-quote marks.
++ *
++ * quoted-string = ( <"> *(qdtext) <"> )
++ *
++ * qdtext = <any CHAR except <"> and CTLs,
++ * but including LWS>
++ *
++ * Single-character quoting using the backslash ("\") character is not
++ * permitted in HTTP/1.0.
++ * "
++ *
++ * - RFC 7230 section 3.2.6
++ * "
++ * A string of text is parsed as a single value if it is quoted using
++ * double-quote marks.
++ *
++ * quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE
++ * qdtext = HTAB / SP /%x21 / %x23-5B / %x5D-7E / obs-text
++ * obs-text = %x80-FF
++ * "
+ */
+-class Tokenizer : public ::Parser::Tokenizer
+-{
+-public:
+- Tokenizer(SBuf &s) : ::Parser::Tokenizer(s), savedStats_(0) {}
+-
+- /**
+- * Attempt to parse a quoted-string lexical construct.
+- *
+- * Governed by:
+- * - RFC 1945 section 2.1
+- * "
+- * A string of text is parsed as a single word if it is quoted using
+- * double-quote marks.
+- *
+- * quoted-string = ( <"> *(qdtext) <"> )
+- *
+- * qdtext = <any CHAR except <"> and CTLs,
+- * but including LWS>
+- *
+- * Single-character quoting using the backslash ("\") character is not
+- * permitted in HTTP/1.0.
+- * "
+- *
+- * - RFC 7230 section 3.2.6
+- * "
+- * A string of text is parsed as a single value if it is quoted using
+- * double-quote marks.
+- *
+- * quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE
+- * qdtext = HTAB / SP /%x21 / %x23-5B / %x5D-7E / obs-text
+- * obs-text = %x80-FF
+- * "
+- *
+- * \param escaped HTTP/1.0 does not permit \-escaped characters
+- */
+- bool quotedString(SBuf &value, const bool http1p0 = false);
+-
+- /**
+- * Attempt to parse a (token / quoted-string ) lexical construct.
+- */
+- bool quotedStringOrToken(SBuf &value, const bool http1p0 = false);
+-
+-private:
+- /// parse the internal component of a quote-string, and terminal DQUOTE
+- bool qdText(SBuf &value, const bool http1p0);
+-
+- void checkpoint() { savedCheckpoint_ = buf(); savedStats_ = parsedSize(); }
+- void restoreLastCheckpoint() { undoParse(savedCheckpoint_, savedStats_); }
+-
+- SBuf savedCheckpoint_;
+- SBuf::size_type savedStats_;
+-};
++SBuf tokenOrQuotedString(Parser::Tokenizer &tok, const bool http1p0 = false);
+
+ } // namespace One
+ } // namespace Http
+--- a/src/http/one/forward.h
++++ b/src/http/one/forward.h
+@@ -10,6 +10,7 @@
+ #define SQUID_SRC_HTTP_ONE_FORWARD_H
+
+ #include "base/RefCount.h"
++#include "parser/forward.h"
+ #include "sbuf/forward.h"
+
+ namespace Http {
+@@ -31,6 +32,8 @@ typedef RefCount<Http::One::ResponsePars
+ /// CRLF textual representation
+ const SBuf &CrLf();
+
++using ::Parser::InsufficientInput;
++
+ } // namespace One
+ } // namespace Http
+
+--- a/src/parser/BinaryTokenizer.h
++++ b/src/parser/BinaryTokenizer.h
+@@ -9,6 +9,7 @@
+ #ifndef SQUID_SRC_PARSER_BINARYTOKENIZER_H
+ #define SQUID_SRC_PARSER_BINARYTOKENIZER_H
+
++#include "parser/forward.h"
+ #include "sbuf/SBuf.h"
+
+ namespace Parser
+@@ -44,7 +45,7 @@ public:
+ class BinaryTokenizer
+ {
+ public:
+- class InsufficientInput {}; // thrown when a method runs out of data
++ typedef ::Parser::InsufficientInput InsufficientInput;
+ typedef uint64_t size_type; // enough for the largest supported offset
+
+ BinaryTokenizer();
+--- a/src/parser/Makefile.am
++++ b/src/parser/Makefile.am
+@@ -13,6 +13,7 @@ noinst_LTLIBRARIES = libparser.la
+ libparser_la_SOURCES = \
+ BinaryTokenizer.h \
+ BinaryTokenizer.cc \
++ forward.h \
+ Tokenizer.h \
+ Tokenizer.cc
+
+--- a/src/parser/Tokenizer.cc
++++ b/src/parser/Tokenizer.cc
+@@ -10,7 +10,9 @@
+
+ #include "squid.h"
+ #include "Debug.h"
++#include "parser/forward.h"
+ #include "parser/Tokenizer.h"
++#include "sbuf/Stream.h"
+
+ #include <cerrno>
+ #if HAVE_CTYPE_H
+@@ -96,6 +98,23 @@ Parser::Tokenizer::prefix(SBuf &returned
+ return true;
+ }
+
++SBuf
++Parser::Tokenizer::prefix(const char *description, const CharacterSet &tokenChars, const SBuf::size_type limit)
++{
++ if (atEnd())
++ throw InsufficientInput();
++
++ SBuf result;
++
++ if (!prefix(result, tokenChars, limit))
++ throw TexcHere(ToSBuf("cannot parse ", description));
++
++ if (atEnd())
++ throw InsufficientInput();
++
++ return result;
++}
++
+ bool
+ Parser::Tokenizer::suffix(SBuf &returnedToken, const CharacterSet &tokenChars, const SBuf::size_type limit)
+ {
+@@ -283,3 +302,24 @@ Parser::Tokenizer::int64(int64_t & resul
+ return success(s - range.rawContent());
+ }
+
++int64_t
++Parser::Tokenizer::udec64(const char *description, const SBuf::size_type limit)
++{
++ if (atEnd())
++ throw InsufficientInput();
++
++ int64_t result = 0;
++
++ // Since we only support unsigned decimals, a parsing failure with a
++ // non-empty input always implies invalid/malformed input (or a buggy
++ // limit=0 caller). TODO: Support signed and non-decimal integers by
++ // refactoring int64() to detect insufficient input.
++ if (!int64(result, 10, false, limit))
++ throw TexcHere(ToSBuf("cannot parse ", description));
++
++ if (atEnd())
++ throw InsufficientInput(); // more digits may be coming
++
++ return result;
++}
++
+--- a/src/parser/Tokenizer.h
++++ b/src/parser/Tokenizer.h
+@@ -143,6 +143,19 @@ public:
+ */
+ bool int64(int64_t &result, int base = 0, bool allowSign = true, SBuf::size_type limit = SBuf::npos);
+
++ /*
++ * The methods below mimic their counterparts documented above, but they
++ * throw on errors, including InsufficientInput. The field description
++ * parameter is used for error reporting and debugging.
++ */
++
++ /// prefix() wrapper but throws InsufficientInput if input contains
++ /// nothing but the prefix (i.e. if the prefix is not "terminated")
++ SBuf prefix(const char *description, const CharacterSet &tokenChars, SBuf::size_type limit = SBuf::npos);
++
++ /// int64() wrapper but limited to unsigned decimal integers (for now)
++ int64_t udec64(const char *description, SBuf::size_type limit = SBuf::npos);
++
+ protected:
+ SBuf consume(const SBuf::size_type n);
+ SBuf::size_type success(const SBuf::size_type n);
+--- /dev/null
++++ b/src/parser/forward.h
+@@ -0,0 +1,22 @@
++/*
++ * Copyright (C) 1996-2019 The Squid Software Foundation and contributors
++ *
++ * Squid software is distributed under GPLv2+ license and includes
++ * contributions from numerous individuals and organizations.
++ * Please see the COPYING and CONTRIBUTORS files for details.
++ */
++
++#ifndef SQUID_PARSER_FORWARD_H
++#define SQUID_PARSER_FORWARD_H
++
++namespace Parser {
++class Tokenizer;
++class BinaryTokenizer;
++
++// TODO: Move this declaration (to parser/Elements.h) if we need more like it.
++/// thrown by modern "incremental" parsers when they need more data
++class InsufficientInput {};
++} // namespace Parser
++
++#endif /* SQUID_PARSER_FORWARD_H */
++
diff --git a/meta-networking/recipes-daemons/squid/files/CVE-2023-46846.patch b/meta-networking/recipes-daemons/squid/files/CVE-2023-46846.patch
new file mode 100644
index 0000000000..a6d0965e7a
--- /dev/null
+++ b/meta-networking/recipes-daemons/squid/files/CVE-2023-46846.patch
@@ -0,0 +1,169 @@
+From 05f6af2f4c85cc99323cfff6149c3d74af661b6d Mon Sep 17 00:00:00 2001
+From: Amos Jeffries <yadij@users.noreply.github.com>
+Date: Fri, 13 Oct 2023 08:44:16 +0000
+Subject: [PATCH] RFC 9112: Improve HTTP chunked encoding compliance (#1498)
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/squid/tree/debian/patches/CVE-2023-46846.patch?h=ubuntu/focal-security&id=9ccd217ca9428c9a6597e9310a99552026b245fa
+Upstream commit https://github.com/squid-cache/squid/commit/05f6af2f4c85cc99323cfff6149c3d74af661b6d]
+CVE: CVE-2023-46846
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/http/one/Parser.cc | 8 +-------
+ src/http/one/Parser.h | 4 +---
+ src/http/one/TeChunkedParser.cc | 23 ++++++++++++++++++-----
+ src/parser/Tokenizer.cc | 12 ++++++++++++
+ src/parser/Tokenizer.h | 7 +++++++
+ 5 files changed, 39 insertions(+), 15 deletions(-)
+
+--- a/src/http/one/Parser.cc
++++ b/src/http/one/Parser.cc
+@@ -65,16 +65,10 @@ Http::One::Parser::DelimiterCharacters()
+ void
+ Http::One::Parser::skipLineTerminator(Tokenizer &tok) const
+ {
+- if (tok.skip(Http1::CrLf()))
+- return;
+-
+ if (Config.onoff.relaxed_header_parser && tok.skipOne(CharacterSet::LF))
+ return;
+
+- if (tok.atEnd() || (tok.remaining().length() == 1 && tok.remaining().at(0) == '\r'))
+- throw InsufficientInput();
+-
+- throw TexcHere("garbage instead of CRLF line terminator");
++ tok.skipRequired("line-terminating CRLF", Http1::CrLf());
+ }
+
+ /// all characters except the LF line terminator
+--- a/src/http/one/Parser.h
++++ b/src/http/one/Parser.h
+@@ -120,9 +120,7 @@ protected:
+ * detect and skip the CRLF or (if tolerant) LF line terminator
+ * consume from the tokenizer.
+ *
+- * \throws exception on bad or InsuffientInput.
+- * \retval true only if line terminator found.
+- * \retval false incomplete or missing line terminator, need more data.
++ * \throws exception on bad or InsufficientInput
+ */
+ void skipLineTerminator(Tokenizer &) const;
+
+--- a/src/http/one/TeChunkedParser.cc
++++ b/src/http/one/TeChunkedParser.cc
+@@ -91,6 +91,11 @@ Http::One::TeChunkedParser::parseChunkSi
+ {
+ Must(theChunkSize <= 0); // Should(), really
+
++ static const SBuf bannedHexPrefixLower("0x");
++ static const SBuf bannedHexPrefixUpper("0X");
++ if (tok.skip(bannedHexPrefixLower) || tok.skip(bannedHexPrefixUpper))
++ throw TextException("chunk starts with 0x", Here());
++
+ int64_t size = -1;
+ if (tok.int64(size, 16, false) && !tok.atEnd()) {
+ if (size < 0)
+@@ -121,7 +126,7 @@ Http::One::TeChunkedParser::parseChunkMe
+ // bad or insufficient input, like in the code below. TODO: Expand up.
+ try {
+ parseChunkExtensions(tok); // a possibly empty chunk-ext list
+- skipLineTerminator(tok);
++ tok.skipRequired("CRLF after [chunk-ext]", Http1::CrLf());
+ buf_ = tok.remaining();
+ parsingStage_ = theChunkSize ? Http1::HTTP_PARSE_CHUNK : Http1::HTTP_PARSE_MIME;
+ return true;
+@@ -132,12 +137,14 @@ Http::One::TeChunkedParser::parseChunkMe
+ // other exceptions bubble up to kill message parsing
+ }
+
+-/// Parses the chunk-ext list (RFC 7230 section 4.1.1 and its Errata #4667):
++/// Parses the chunk-ext list (RFC 9112 section 7.1.1:
+ /// chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
+ void
+-Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &tok)
++Http::One::TeChunkedParser::parseChunkExtensions(Tokenizer &callerTok)
+ {
+ do {
++ auto tok = callerTok;
++
+ ParseBws(tok); // Bug 4492: IBM_HTTP_Server sends SP after chunk-size
+
+ if (!tok.skip(';'))
+@@ -145,6 +152,7 @@ Http::One::TeChunkedParser::parseChunkEx
+
+ parseOneChunkExtension(tok);
+ buf_ = tok.remaining(); // got one extension
++ callerTok = tok;
+ } while (true);
+ }
+
+@@ -158,11 +166,14 @@ Http::One::ChunkExtensionValueParser::Ig
+ /// Parses a single chunk-ext list element:
+ /// chunk-ext = *( BWS ";" BWS chunk-ext-name [ BWS "=" BWS chunk-ext-val ] )
+ void
+-Http::One::TeChunkedParser::parseOneChunkExtension(Tokenizer &tok)
++Http::One::TeChunkedParser::parseOneChunkExtension(Tokenizer &callerTok)
+ {
++ auto tok = callerTok;
++
+ ParseBws(tok); // Bug 4492: ICAP servers send SP before chunk-ext-name
+
+ const auto extName = tok.prefix("chunk-ext-name", CharacterSet::TCHAR);
++ callerTok = tok; // in case we determine that this is a valueless chunk-ext
+
+ ParseBws(tok);
+
+@@ -176,6 +187,8 @@ Http::One::TeChunkedParser::parseOneChun
+ customExtensionValueParser->parse(tok, extName);
+ else
+ ChunkExtensionValueParser::Ignore(tok, extName);
++
++ callerTok = tok;
+ }
+
+ bool
+@@ -209,7 +222,7 @@ Http::One::TeChunkedParser::parseChunkEn
+ Must(theLeftBodySize == 0); // Should(), really
+
+ try {
+- skipLineTerminator(tok);
++ tok.skipRequired("chunk CRLF", Http1::CrLf());
+ buf_ = tok.remaining(); // parse checkpoint
+ theChunkSize = 0; // done with the current chunk
+ parsingStage_ = Http1::HTTP_PARSE_CHUNK_SZ;
+--- a/src/parser/Tokenizer.cc
++++ b/src/parser/Tokenizer.cc
+@@ -147,6 +147,18 @@ Parser::Tokenizer::skipAll(const Charact
+ return success(prefixLen);
+ }
+
++void
++Parser::Tokenizer::skipRequired(const char *description, const SBuf &tokenToSkip)
++{
++ if (skip(tokenToSkip) || tokenToSkip.isEmpty())
++ return;
++
++ if (tokenToSkip.startsWith(buf_))
++ throw InsufficientInput();
++
++ throw TextException(ToSBuf("cannot skip ", description), Here());
++}
++
+ bool
+ Parser::Tokenizer::skipOne(const CharacterSet &chars)
+ {
+--- a/src/parser/Tokenizer.h
++++ b/src/parser/Tokenizer.h
+@@ -115,6 +115,13 @@ public:
+ */
+ SBuf::size_type skipAll(const CharacterSet &discardables);
+
++ /** skips a given character sequence (string);
++ * does nothing if the sequence is empty
++ *
++ * \throws exception on mismatching prefix or InsufficientInput
++ */
++ void skipRequired(const char *description, const SBuf &tokenToSkip);
++
+ /** Removes a single trailing character from the set.
+ *
+ * \return whether a character was removed
diff --git a/meta-networking/recipes-daemons/squid/files/CVE-2023-46847.patch b/meta-networking/recipes-daemons/squid/files/CVE-2023-46847.patch
new file mode 100644
index 0000000000..9071872c01
--- /dev/null
+++ b/meta-networking/recipes-daemons/squid/files/CVE-2023-46847.patch
@@ -0,0 +1,47 @@
+From 052cf082b0faaef4eaaa4e94119d7a1437aac4a3 Mon Sep 17 00:00:00 2001
+From: squidadm <squidadm@users.noreply.github.com>
+Date: Wed, 18 Oct 2023 04:50:56 +1300
+Subject: [PATCH] Fix stack buffer overflow when parsing Digest Authorization
+ (#1517)
+
+The bug was discovered and detailed by Joshua Rogers at
+https://megamansec.github.io/Squid-Security-Audit/digest-overflow.html
+where it was filed as "Stack Buffer Overflow in Digest Authentication".
+
+---------
+
+Co-authored-by: Alex Bason <nonsleepr@gmail.com>
+Co-authored-by: Amos Jeffries <yadij@users.noreply.github.com>
+
+Upstream-Status: Backport [https://github.com/squid-cache/squid/commit/052cf082b0faaef4eaaa4e94119d7a1437aac4a3]
+CVE: CVE-2023-46847
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ src/auth/digest/Config.cc | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/src/auth/digest/Config.cc b/src/auth/digest/Config.cc
+index 6a9736f..0a883fa 100644
+--- a/src/auth/digest/Config.cc
++++ b/src/auth/digest/Config.cc
+@@ -847,11 +847,15 @@ Auth::Digest::Config::decode(char const *proxy_auth, const char *aRequestRealm)
+ break;
+
+ case DIGEST_NC:
+- if (value.size() != 8) {
++ if (value.size() == 8) {
++ // for historical reasons, the nc value MUST be exactly 8 bytes
++ static_assert(sizeof(digest_request->nc) == 8 + 1, "bad nc buffer size");
++ xstrncpy(digest_request->nc, value.rawBuf(), value.size() + 1);
++ debugs(29, 9, "Found noncecount '" << digest_request->nc << "'");
++ } else {
+ debugs(29, 9, "Invalid nc '" << value << "' in '" << temp << "'");
++ digest_request->nc[0] = 0;
+ }
+- xstrncpy(digest_request->nc, value.rawBuf(), value.size() + 1);
+- debugs(29, 9, "Found noncecount '" << digest_request->nc << "'");
+ break;
+
+ case DIGEST_CNONCE:
+--
+2.40.1
diff --git a/meta-networking/recipes-daemons/squid/files/CVE-2023-49285.patch b/meta-networking/recipes-daemons/squid/files/CVE-2023-49285.patch
new file mode 100644
index 0000000000..6909f754f3
--- /dev/null
+++ b/meta-networking/recipes-daemons/squid/files/CVE-2023-49285.patch
@@ -0,0 +1,37 @@
+From 77b3fb4df0f126784d5fd4967c28ed40eb8d521b Mon Sep 17 00:00:00 2001
+From: Alex Rousskov <rousskov@measurement-factory.com>
+Date: Wed, 25 Oct 2023 19:41:45 +0000
+Subject: [PATCH] RFC 1123: Fix date parsing (#1538)
+
+The bug was discovered and detailed by Joshua Rogers at
+https://megamansec.github.io/Squid-Security-Audit/datetime-overflow.html
+where it was filed as "1-Byte Buffer OverRead in RFC 1123 date/time
+Handling".
+
+Upstream-Status: Backport [https://github.com/squid-cache/squid/commit/77b3fb4df0f126784d5fd4967c28ed40eb8d521b]
+CVE: CVE-2023-49285
+Signed-off-by: Vivek Kumbhar <vkumbhar@mvista.com>
+---
+ lib/rfc1123.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/lib/rfc1123.c b/lib/rfc1123.c
+index 2d889cc..add63f0 100644
+--- a/lib/rfc1123.c
++++ b/lib/rfc1123.c
+@@ -50,7 +50,13 @@ make_month(const char *s)
+ char month[3];
+
+ month[0] = xtoupper(*s);
++ if (!month[0])
++ return -1; // protects *(s + 1) below
++
+ month[1] = xtolower(*(s + 1));
++ if (!month[1])
++ return -1; // protects *(s + 2) below
++
+ month[2] = xtolower(*(s + 2));
+
+ for (i = 0; i < 12; i++)
+--
+2.39.3
diff --git a/meta-networking/recipes-daemons/squid/files/CVE-2023-49286.patch b/meta-networking/recipes-daemons/squid/files/CVE-2023-49286.patch
new file mode 100644
index 0000000000..8e0bdf387c
--- /dev/null
+++ b/meta-networking/recipes-daemons/squid/files/CVE-2023-49286.patch
@@ -0,0 +1,87 @@
+From 6014c6648a2a54a4ecb7f952ea1163e0798f9264 Mon Sep 17 00:00:00 2001
+From: Alex Rousskov <rousskov@measurement-factory.com>
+Date: Fri, 27 Oct 2023 21:27:20 +0000
+Subject: [PATCH] Exit without asserting when helper process startup fails
+ (#1543)
+
+... to dup() after fork() and before execvp().
+
+Assertions are for handling program logic errors. Helper initialization
+code already handled system call errors correctly (i.e. by exiting the
+newly created helper process with an error), except for a couple of
+assert()s that could be triggered by dup(2) failures.
+
+This bug was discovered and detailed by Joshua Rogers at
+https://megamansec.github.io/Squid-Security-Audit/ipc-assert.html
+where it was filed as 'Assertion in Squid "Helper" Process Creator'.
+
+Origin: http://www.squid-cache.org/Versions/v6/SQUID-2023_8.patch
+
+Upstream-Status: Backport [https://github.com/squid-cache/squid/commit/6014c6648a2a54a4ecb7f952ea1163e0798f9264]
+CVE: CVE-2023-49286
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/ipc.cc | 33 +++++++++++++++++++++++++++------
+ 1 file changed, 27 insertions(+), 6 deletions(-)
+
+--- a/src/ipc.cc
++++ b/src/ipc.cc
+@@ -20,6 +20,12 @@
+ #include "SquidIpc.h"
+ #include "tools.h"
+
++#include <cstdlib>
++
++#if HAVE_UNISTD_H
++#include <unistd.h>
++#endif
++
+ static const char *hello_string = "hi there\n";
+ #ifndef HELLO_BUF_SZ
+ #define HELLO_BUF_SZ 32
+@@ -365,6 +371,22 @@
+ }
+
+ PutEnvironment();
++
++ // A dup(2) wrapper that reports and exits the process on errors. The
++ // exiting logic is only suitable for this child process context.
++ const auto dupOrExit = [prog,name](const int oldFd) {
++ const auto newFd = dup(oldFd);
++ if (newFd < 0) {
++ const auto savedErrno = errno;
++ debugs(54, DBG_CRITICAL, "ERROR: Helper process initialization failure: " << name);
++ debugs(54, DBG_CRITICAL, "helper (CHILD) PID: " << getpid());
++ debugs(54, DBG_CRITICAL, "helper program name: " << prog);
++ debugs(54, DBG_CRITICAL, "dup(2) system call error for FD " << oldFd << ": " << xstrerr(savedErrno));
++ _exit(1);
++ }
++ return newFd;
++ };
++
+ /*
+ * This double-dup stuff avoids problems when one of
+ * crfd, cwfd, or debug_log are in the rage 0-2.
+@@ -372,17 +394,16 @@
+
+ do {
+ /* First make sure 0-2 is occupied by something. Gets cleaned up later */
+- x = dup(crfd);
+- assert(x > -1);
+- } while (x < 3 && x > -1);
++ x = dupOrExit(crfd);
++ } while (x < 3);
+
+ close(x);
+
+- t1 = dup(crfd);
++ t1 = dupOrExit(crfd);
+
+- t2 = dup(cwfd);
++ t2 = dupOrExit(cwfd);
+
+- t3 = dup(fileno(debug_log));
++ t3 = dupOrExit(fileno(debug_log));
+
+ assert(t1 > 2 && t2 > 2 && t3 > 2);
+
diff --git a/meta-networking/recipes-daemons/squid/files/CVE-2023-50269.patch b/meta-networking/recipes-daemons/squid/files/CVE-2023-50269.patch
new file mode 100644
index 0000000000..51c895e0ef
--- /dev/null
+++ b/meta-networking/recipes-daemons/squid/files/CVE-2023-50269.patch
@@ -0,0 +1,62 @@
+From: Markus Koschany <apo@debian.org>
+Date: Tue, 26 Dec 2023 19:58:12 +0100
+Subject: CVE-2023-50269
+
+Bug-Debian: https://bugs.debian.org/1058721
+Origin: http://www.squid-cache.org/Versions/v5/SQUID-2023_10.patch
+
+Upstream-Status: Backport [import from ubuntu https://git.launchpad.net/ubuntu/+source/squid/tree/debian/patches/CVE-2023-50269.patch?h=ubuntu/focal-security&id=9ccd217ca9428c9a6597e9310a99552026b245fa
+Upstream commit https://github.com/squid-cache/squid/commit/9f7136105bff920413042a8806cc5de3f6086d6d]
+CVE: CVE-2023-50269
+Signed-off-by: Vijay Anusuri <vanusuri@mvista.com>
+---
+ src/ClientRequestContext.h | 4 ++++
+ src/client_side_request.cc | 17 +++++++++++++++--
+ 2 files changed, 19 insertions(+), 2 deletions(-)
+
+--- a/src/ClientRequestContext.h
++++ b/src/ClientRequestContext.h
+@@ -81,6 +81,10 @@
+ #endif
+ ErrorState *error; ///< saved error page for centralized/delayed processing
+ bool readNextRequest; ///< whether Squid should read after error handling
++
++#if FOLLOW_X_FORWARDED_FOR
++ size_t currentXffHopNumber = 0; ///< number of X-Forwarded-For header values processed so far
++#endif
+ };
+
+ #endif /* SQUID_CLIENTREQUESTCONTEXT_H */
+--- a/src/client_side_request.cc
++++ b/src/client_side_request.cc
+@@ -78,6 +78,11 @@
+ static const char *const crlf = "\r\n";
+
+ #if FOLLOW_X_FORWARDED_FOR
++
++#if !defined(SQUID_X_FORWARDED_FOR_HOP_MAX)
++#define SQUID_X_FORWARDED_FOR_HOP_MAX 64
++#endif
++
+ static void clientFollowXForwardedForCheck(allow_t answer, void *data);
+ #endif /* FOLLOW_X_FORWARDED_FOR */
+
+@@ -485,8 +490,16 @@
+ /* override the default src_addr tested if we have to go deeper than one level into XFF */
+ Filled(calloutContext->acl_checklist)->src_addr = request->indirect_client_addr;
+ }
+- calloutContext->acl_checklist->nonBlockingCheck(clientFollowXForwardedForCheck, data);
+- return;
++ if (++calloutContext->currentXffHopNumber < SQUID_X_FORWARDED_FOR_HOP_MAX) {
++ calloutContext->acl_checklist->nonBlockingCheck(clientFollowXForwardedForCheck, data);
++ return;
++ }
++ const auto headerName = Http::HeaderLookupTable.lookup(Http::HdrType::X_FORWARDED_FOR).name;
++ debugs(28, DBG_CRITICAL, "ERROR: Ignoring trailing " << headerName << " addresses");
++ debugs(28, DBG_CRITICAL, "addresses allowed by follow_x_forwarded_for: " << calloutContext->currentXffHopNumber);
++ debugs(28, DBG_CRITICAL, "last/accepted address: " << request->indirect_client_addr);
++ debugs(28, DBG_CRITICAL, "ignored trailing addresses: " << request->x_forwarded_for_iterator);
++ // fall through to resume clientAccessCheck() processing
+ }
+ }
+
diff --git a/meta-networking/recipes-daemons/squid/squid_4.15.bb b/meta-networking/recipes-daemons/squid/squid_4.15.bb
index a1122a3cd4..69b62aa5a5 100644
--- a/meta-networking/recipes-daemons/squid/squid_4.15.bb
+++ b/meta-networking/recipes-daemons/squid/squid_4.15.bb
@@ -25,6 +25,13 @@ SRC_URI = "http://www.squid-cache.org/Versions/v${MAJ_VER}/${BPN}-${PV}.tar.bz2
file://0001-tools.cc-fixed-unused-result-warning.patch \
file://0001-splay.cc-fix-bind-is-not-a-member-of-std.patch \
file://0001-Fix-build-on-Fedora-Rawhide-772.patch \
+ file://CVE-2023-46847.patch \
+ file://CVE-2023-49285.patch \
+ file://CVE-2023-46728.patch \
+ file://CVE-2023-46846-pre1.patch \
+ file://CVE-2023-46846.patch \
+ file://CVE-2023-49286.patch \
+ file://CVE-2023-50269.patch \
"
SRC_URI:remove:toolchain-clang = "file://0001-configure-Check-for-Wno-error-format-truncation-comp.patch"