diff --git a/src/IO/HTTPChunkedReadBuffer.cpp b/src/IO/HTTPChunkedReadBuffer.cpp index bd9bbba4c6c449cd762a81211662bbd30d074ccd..374e04031d07fbb7aaa1eecc5e1a3fe09e1748fa 100644 --- a/src/IO/HTTPChunkedReadBuffer.cpp +++ b/src/IO/HTTPChunkedReadBuffer.cpp @@ -14,7 +14,6 @@ namespace ErrorCodes extern const int ARGUMENT_OUT_OF_BOUND; extern const int UNEXPECTED_END_OF_FILE; extern const int CORRUPTED_DATA; - extern const int TOO_MANY_BYTES; } size_t HTTPChunkedReadBuffer::readChunkHeader() @@ -40,9 +39,6 @@ size_t HTTPChunkedReadBuffer::readChunkHeader() if (in->eof()) throw Exception("Unexpected end of file while reading chunk header of HTTP chunked data", ErrorCodes::UNEXPECTED_END_OF_FILE); - if (res > max_size) - throw Exception("Chunk size is too large", ErrorCodes::TOO_MANY_BYTES); - assertString("\n", *in); return res; } diff --git a/src/IO/HTTPChunkedReadBuffer.h b/src/IO/HTTPChunkedReadBuffer.h index 0ccebc69d08e3135956a5741b336aef1c672c3bc..378835cafc0c5fe50c10f667fdc3c7bec8a1816c 100644 --- a/src/IO/HTTPChunkedReadBuffer.h +++ b/src/IO/HTTPChunkedReadBuffer.h @@ -10,11 +10,10 @@ namespace DB class HTTPChunkedReadBuffer : public BufferWithOwnMemory { public: - HTTPChunkedReadBuffer(std::unique_ptr in_, size_t max_chunk_size) : in(std::move(in_)), max_size(max_chunk_size) {} + explicit HTTPChunkedReadBuffer(std::unique_ptr in_) : in(std::move(in_)) {} private: std::unique_ptr in; - const size_t max_size; size_t readChunkHeader(); void readChunkFooter(); diff --git a/src/Server/HTTP/HTTPServerRequest.cpp b/src/Server/HTTP/HTTPServerRequest.cpp index bdba6a51d91a25a35f6877dc08a65cb5773a0235..2a765f36fd7b52b12f54803e7fac436d94685d71 100644 --- a/src/Server/HTTP/HTTPServerRequest.cpp +++ b/src/Server/HTTP/HTTPServerRequest.cpp @@ -26,7 +26,6 @@ HTTPServerRequest::HTTPServerRequest(const Context & context, HTTPServerResponse auto receive_timeout = context.getSettingsRef().http_receive_timeout; auto send_timeout = context.getSettingsRef().http_send_timeout; - auto max_query_size = context.getSettingsRef().max_query_size; session.socket().setReceiveTimeout(receive_timeout); session.socket().setSendTimeout(send_timeout); @@ -37,7 +36,7 @@ HTTPServerRequest::HTTPServerRequest(const Context & context, HTTPServerResponse readRequest(*in); /// Try parse according to RFC7230 if (getChunkedTransferEncoding()) - stream = std::make_unique(std::move(in), max_query_size); + stream = std::make_unique(std::move(in)); else if (hasContentLength()) stream = std::make_unique(std::move(in), getContentLength(), false); else if (getMethod() != HTTPRequest::HTTP_GET && getMethod() != HTTPRequest::HTTP_HEAD && getMethod() != HTTPRequest::HTTP_DELETE) diff --git a/tests/queries/0_stateless/01783_http_chunk_size.reference b/tests/queries/0_stateless/01783_http_chunk_size.reference new file mode 100644 index 0000000000000000000000000000000000000000..e454a00607ce6a4e95f87e7bc71fcc832e71c578 --- /dev/null +++ b/tests/queries/0_stateless/01783_http_chunk_size.reference @@ -0,0 +1 @@ +1234567890 1234567890 1234567890 1234567890 diff --git a/tests/queries/0_stateless/01783_http_chunk_size.sh b/tests/queries/0_stateless/01783_http_chunk_size.sh new file mode 100755 index 0000000000000000000000000000000000000000..66ac4dfa975fe6e1eb30f7a3a99ef924d0cee875 --- /dev/null +++ b/tests/queries/0_stateless/01783_http_chunk_size.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +URL="${CLICKHOUSE_URL}&session_id=id_${CLICKHOUSE_DATABASE}" + +echo "DROP TABLE IF EXISTS table" | ${CLICKHOUSE_CURL} -sSg "${URL}" -d @- +echo "CREATE TABLE table (a String) ENGINE Memory()" | ${CLICKHOUSE_CURL} -sSg "${URL}" -d @- + +# NOTE: suppose that curl sends everything in a single chunk - there are no options to force the chunk-size. +echo "SET max_query_size=44" | ${CLICKHOUSE_CURL} -sSg "${URL}" -d @- +echo -ne "INSERT INTO TABLE table FORMAT TabSeparated 1234567890 1234567890 1234567890 1234567890\n" | ${CLICKHOUSE_CURL} -H "Transfer-Encoding: chunked" -sS "${URL}" --data-binary @- + +echo "SELECT * from table" | ${CLICKHOUSE_CURL} -sSg "${URL}" -d @- +echo "DROP TABLE table" | ${CLICKHOUSE_CURL} -sSg "${URL}" -d @- diff --git a/tests/queries/skip_list.json b/tests/queries/skip_list.json index 1064ddd8b4f152f0d28e99cd4f955417d250cdd3..4759fb956027857bc0547ec47b7156015009500f 100644 --- a/tests/queries/skip_list.json +++ b/tests/queries/skip_list.json @@ -164,7 +164,8 @@ "00062_replicated_merge_tree_alter_zookeeper", /// Does not support renaming of multiple tables in single query "00634_rename_view", - "00140_rename" + "00140_rename", + "01783_http_chunk_size" ], "polymorphic-parts": [ "01508_partition_pruning_long", /// bug, shoud be fixed