2020-01-18 09:38:21 +01:00
|
|
|
/*
|
2024-10-04 13:19:50 +02:00
|
|
|
* Copyright (c) 2018-2020, Andreas Kling <andreas@ladybird.org>
|
2020-01-18 09:38:21 +01:00
|
|
|
*
|
2021-04-22 01:24:48 -07:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-01-18 09:38:21 +01:00
|
|
|
*/
|
|
|
|
|
2024-08-06 21:51:20 -06:00
|
|
|
#include <LibRequests/Request.h>
|
|
|
|
#include <LibRequests/RequestClient.h>
|
2019-11-24 13:20:44 +01:00
|
|
|
|
2024-08-06 21:51:20 -06:00
|
|
|
namespace Requests {
|
2019-11-24 13:20:44 +01:00
|
|
|
|
2021-04-23 22:45:52 +02:00
|
|
|
Request::Request(RequestClient& client, i32 request_id)
|
AK: Make RefPtr, NonnullRefPtr, WeakPtr thread safe
This makes most operations thread safe, especially so that they
can safely be used in the Kernel. This includes obtaining a strong
reference from a weak reference, which now requires an explicit
call to WeakPtr::strong_ref(). Another major change is that
Weakable::make_weak_ref() may require the explicit target type.
Previously we used reinterpret_cast in WeakPtr, assuming that it
can be properly converted. But WeakPtr does not necessarily have
the knowledge to be able to do this. Instead, we now ask the class
itself to deliver a WeakPtr to the type that we want.
Also, WeakLink is no longer specific to a target type. The reason
for this is that we want to be able to safely convert e.g. WeakPtr<T>
to WeakPtr<U>, and before this we just reinterpret_cast the internal
WeakLink<T> to WeakLink<U>, which is a bold assumption that it would
actually produce the correct code. Instead, WeakLink now operates
on just a raw pointer and we only make those constructors/operators
available if we can verify that it can be safely cast.
In order to guarantee thread safety, we now use the least significant
bit in the pointer for locking purposes. This also means that only
properly aligned pointers can be used.
2020-09-29 16:26:13 -06:00
|
|
|
: m_client(client)
|
2021-04-23 22:45:52 +02:00
|
|
|
, m_request_id(request_id)
|
2019-11-24 13:20:44 +01:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2021-04-23 22:45:52 +02:00
|
|
|
bool Request::stop()
|
2019-11-24 13:20:44 +01:00
|
|
|
{
|
2024-05-24 11:37:02 -04:00
|
|
|
on_headers_received = nullptr;
|
|
|
|
on_finish = nullptr;
|
|
|
|
on_certificate_requested = nullptr;
|
|
|
|
|
|
|
|
m_internal_buffered_data = nullptr;
|
|
|
|
m_internal_stream_data = nullptr;
|
|
|
|
m_mode = Mode::Unknown;
|
|
|
|
|
2021-04-23 22:45:52 +02:00
|
|
|
return m_client->stop_request({}, *this);
|
2019-11-24 13:20:44 +01:00
|
|
|
}
|
|
|
|
|
2024-08-06 21:51:20 -06:00
|
|
|
void Request::set_request_fd(Badge<Requests::RequestClient>, int fd)
|
2024-02-24 14:36:57 +01:00
|
|
|
{
|
|
|
|
VERIFY(m_fd == -1);
|
|
|
|
m_fd = fd;
|
|
|
|
|
|
|
|
auto notifier = Core::Notifier::construct(fd, Core::Notifier::Type::Read);
|
|
|
|
auto stream = MUST(Core::File::adopt_fd(fd, Core::File::OpenMode::Read));
|
|
|
|
notifier->on_activation = move(m_internal_stream_data->read_notifier->on_activation);
|
|
|
|
m_internal_stream_data->read_notifier = move(notifier);
|
|
|
|
m_internal_stream_data->read_stream = move(stream);
|
|
|
|
}
|
|
|
|
|
2024-05-24 11:37:02 -04:00
|
|
|
void Request::set_buffered_request_finished_callback(BufferedRequestFinished on_buffered_request_finished)
|
ProtocolServer: Stream the downloaded data if possible
This patchset makes ProtocolServer stream the downloads to its client
(LibProtocol), and as such changes the download API; a possible
download lifecycle could be as such:
notation = client->server:'>', server->client:'<', pipe activity:'*'
```
> StartDownload(GET, url, headers, {})
< Response(0, fd 8)
* {data, 1024b}
< HeadersBecameAvailable(0, response_headers, 200)
< DownloadProgress(0, 4K, 1024)
* {data, 1024b}
* {data, 1024b}
< DownloadProgress(0, 4K, 2048)
* {data, 1024b}
< DownloadProgress(0, 4K, 1024)
< DownloadFinished(0, true, 4K)
```
Since managing the received file descriptor is a pain, LibProtocol
implements `Download::stream_into(OutputStream)`, which can be used to
stream the download into any given output stream (be it a file, or
memory, or writing stuff with a delay, etc.).
Also, as some of the users of this API require all the downloaded data
upfront, LibProtocol also implements `set_should_buffer_all_input()`,
which causes the download instance to buffer all the data until the
download is complete, and to call the `on_buffered_download_finish`
hook.
2020-12-26 17:14:12 +03:30
|
|
|
{
|
2024-05-24 11:37:02 -04:00
|
|
|
VERIFY(m_mode == Mode::Unknown);
|
|
|
|
m_mode = Mode::Buffered;
|
2020-05-03 22:20:49 +02:00
|
|
|
|
2022-02-02 19:21:55 +03:30
|
|
|
m_internal_buffered_data = make<InternalBufferedData>();
|
2020-05-10 21:26:53 +02:00
|
|
|
|
2024-10-23 16:46:26 -05:00
|
|
|
on_headers_received = [this](auto& headers, auto response_code, auto const& reason_phrase) {
|
ProtocolServer: Stream the downloaded data if possible
This patchset makes ProtocolServer stream the downloads to its client
(LibProtocol), and as such changes the download API; a possible
download lifecycle could be as such:
notation = client->server:'>', server->client:'<', pipe activity:'*'
```
> StartDownload(GET, url, headers, {})
< Response(0, fd 8)
* {data, 1024b}
< HeadersBecameAvailable(0, response_headers, 200)
< DownloadProgress(0, 4K, 1024)
* {data, 1024b}
* {data, 1024b}
< DownloadProgress(0, 4K, 2048)
* {data, 1024b}
< DownloadProgress(0, 4K, 1024)
< DownloadFinished(0, true, 4K)
```
Since managing the received file descriptor is a pain, LibProtocol
implements `Download::stream_into(OutputStream)`, which can be used to
stream the download into any given output stream (be it a file, or
memory, or writing stuff with a delay, etc.).
Also, as some of the users of this API require all the downloaded data
upfront, LibProtocol also implements `set_should_buffer_all_input()`,
which causes the download instance to buffer all the data until the
download is complete, and to call the `on_buffered_download_finish`
hook.
2020-12-26 17:14:12 +03:30
|
|
|
m_internal_buffered_data->response_headers = headers;
|
|
|
|
m_internal_buffered_data->response_code = move(response_code);
|
2024-10-23 16:46:26 -05:00
|
|
|
m_internal_buffered_data->reason_phrase = reason_phrase;
|
ProtocolServer: Stream the downloaded data if possible
This patchset makes ProtocolServer stream the downloads to its client
(LibProtocol), and as such changes the download API; a possible
download lifecycle could be as such:
notation = client->server:'>', server->client:'<', pipe activity:'*'
```
> StartDownload(GET, url, headers, {})
< Response(0, fd 8)
* {data, 1024b}
< HeadersBecameAvailable(0, response_headers, 200)
< DownloadProgress(0, 4K, 1024)
* {data, 1024b}
* {data, 1024b}
< DownloadProgress(0, 4K, 2048)
* {data, 1024b}
< DownloadProgress(0, 4K, 1024)
< DownloadFinished(0, true, 4K)
```
Since managing the received file descriptor is a pain, LibProtocol
implements `Download::stream_into(OutputStream)`, which can be used to
stream the download into any given output stream (be it a file, or
memory, or writing stuff with a delay, etc.).
Also, as some of the users of this API require all the downloaded data
upfront, LibProtocol also implements `set_should_buffer_all_input()`,
which causes the download instance to buffer all the data until the
download is complete, and to call the `on_buffered_download_finish`
hook.
2020-12-26 17:14:12 +03:30
|
|
|
};
|
|
|
|
|
2025-02-26 13:28:21 +00:00
|
|
|
on_finish = [this, on_buffered_request_finished = move(on_buffered_request_finished)](auto total_size, auto& timing_info, auto network_error) {
|
2023-01-09 12:22:08 +01:00
|
|
|
auto output_buffer = ByteBuffer::create_uninitialized(m_internal_buffered_data->payload_stream.used_buffer_size()).release_value_but_fixme_should_propagate_errors();
|
2023-03-01 15:27:35 +01:00
|
|
|
m_internal_buffered_data->payload_stream.read_until_filled(output_buffer).release_value_but_fixme_should_propagate_errors();
|
2024-05-24 11:37:02 -04:00
|
|
|
|
|
|
|
on_buffered_request_finished(
|
ProtocolServer: Stream the downloaded data if possible
This patchset makes ProtocolServer stream the downloads to its client
(LibProtocol), and as such changes the download API; a possible
download lifecycle could be as such:
notation = client->server:'>', server->client:'<', pipe activity:'*'
```
> StartDownload(GET, url, headers, {})
< Response(0, fd 8)
* {data, 1024b}
< HeadersBecameAvailable(0, response_headers, 200)
< DownloadProgress(0, 4K, 1024)
* {data, 1024b}
* {data, 1024b}
< DownloadProgress(0, 4K, 2048)
* {data, 1024b}
< DownloadProgress(0, 4K, 1024)
< DownloadFinished(0, true, 4K)
```
Since managing the received file descriptor is a pain, LibProtocol
implements `Download::stream_into(OutputStream)`, which can be used to
stream the download into any given output stream (be it a file, or
memory, or writing stuff with a delay, etc.).
Also, as some of the users of this API require all the downloaded data
upfront, LibProtocol also implements `set_should_buffer_all_input()`,
which causes the download instance to buffer all the data until the
download is complete, and to call the `on_buffered_download_finish`
hook.
2020-12-26 17:14:12 +03:30
|
|
|
total_size,
|
2025-02-26 13:28:21 +00:00
|
|
|
timing_info,
|
2024-10-09 17:01:34 -05:00
|
|
|
network_error,
|
ProtocolServer: Stream the downloaded data if possible
This patchset makes ProtocolServer stream the downloads to its client
(LibProtocol), and as such changes the download API; a possible
download lifecycle could be as such:
notation = client->server:'>', server->client:'<', pipe activity:'*'
```
> StartDownload(GET, url, headers, {})
< Response(0, fd 8)
* {data, 1024b}
< HeadersBecameAvailable(0, response_headers, 200)
< DownloadProgress(0, 4K, 1024)
* {data, 1024b}
* {data, 1024b}
< DownloadProgress(0, 4K, 2048)
* {data, 1024b}
< DownloadProgress(0, 4K, 1024)
< DownloadFinished(0, true, 4K)
```
Since managing the received file descriptor is a pain, LibProtocol
implements `Download::stream_into(OutputStream)`, which can be used to
stream the download into any given output stream (be it a file, or
memory, or writing stuff with a delay, etc.).
Also, as some of the users of this API require all the downloaded data
upfront, LibProtocol also implements `set_should_buffer_all_input()`,
which causes the download instance to buffer all the data until the
download is complete, and to call the `on_buffered_download_finish`
hook.
2020-12-26 17:14:12 +03:30
|
|
|
m_internal_buffered_data->response_headers,
|
|
|
|
m_internal_buffered_data->response_code,
|
2024-10-23 16:46:26 -05:00
|
|
|
m_internal_buffered_data->reason_phrase,
|
ProtocolServer: Stream the downloaded data if possible
This patchset makes ProtocolServer stream the downloads to its client
(LibProtocol), and as such changes the download API; a possible
download lifecycle could be as such:
notation = client->server:'>', server->client:'<', pipe activity:'*'
```
> StartDownload(GET, url, headers, {})
< Response(0, fd 8)
* {data, 1024b}
< HeadersBecameAvailable(0, response_headers, 200)
< DownloadProgress(0, 4K, 1024)
* {data, 1024b}
* {data, 1024b}
< DownloadProgress(0, 4K, 2048)
* {data, 1024b}
< DownloadProgress(0, 4K, 1024)
< DownloadFinished(0, true, 4K)
```
Since managing the received file descriptor is a pain, LibProtocol
implements `Download::stream_into(OutputStream)`, which can be used to
stream the download into any given output stream (be it a file, or
memory, or writing stuff with a delay, etc.).
Also, as some of the users of this API require all the downloaded data
upfront, LibProtocol also implements `set_should_buffer_all_input()`,
which causes the download instance to buffer all the data until the
download is complete, and to call the `on_buffered_download_finish`
hook.
2020-12-26 17:14:12 +03:30
|
|
|
output_buffer);
|
|
|
|
};
|
|
|
|
|
2024-05-24 11:37:02 -04:00
|
|
|
set_up_internal_stream_data([this](auto read_bytes) {
|
|
|
|
// FIXME: What do we do if this fails?
|
|
|
|
m_internal_buffered_data->payload_stream.write_until_depleted(read_bytes).release_value_but_fixme_should_propagate_errors();
|
|
|
|
});
|
ProtocolServer: Stream the downloaded data if possible
This patchset makes ProtocolServer stream the downloads to its client
(LibProtocol), and as such changes the download API; a possible
download lifecycle could be as such:
notation = client->server:'>', server->client:'<', pipe activity:'*'
```
> StartDownload(GET, url, headers, {})
< Response(0, fd 8)
* {data, 1024b}
< HeadersBecameAvailable(0, response_headers, 200)
< DownloadProgress(0, 4K, 1024)
* {data, 1024b}
* {data, 1024b}
< DownloadProgress(0, 4K, 2048)
* {data, 1024b}
< DownloadProgress(0, 4K, 1024)
< DownloadFinished(0, true, 4K)
```
Since managing the received file descriptor is a pain, LibProtocol
implements `Download::stream_into(OutputStream)`, which can be used to
stream the download into any given output stream (be it a file, or
memory, or writing stuff with a delay, etc.).
Also, as some of the users of this API require all the downloaded data
upfront, LibProtocol also implements `set_should_buffer_all_input()`,
which causes the download instance to buffer all the data until the
download is complete, and to call the `on_buffered_download_finish`
hook.
2020-12-26 17:14:12 +03:30
|
|
|
}
|
|
|
|
|
2024-05-24 11:37:02 -04:00
|
|
|
void Request::set_unbuffered_request_callbacks(HeadersReceived on_headers_received, DataReceived on_data_received, RequestFinished on_finish)
|
ProtocolServer: Stream the downloaded data if possible
This patchset makes ProtocolServer stream the downloads to its client
(LibProtocol), and as such changes the download API; a possible
download lifecycle could be as such:
notation = client->server:'>', server->client:'<', pipe activity:'*'
```
> StartDownload(GET, url, headers, {})
< Response(0, fd 8)
* {data, 1024b}
< HeadersBecameAvailable(0, response_headers, 200)
< DownloadProgress(0, 4K, 1024)
* {data, 1024b}
* {data, 1024b}
< DownloadProgress(0, 4K, 2048)
* {data, 1024b}
< DownloadProgress(0, 4K, 1024)
< DownloadFinished(0, true, 4K)
```
Since managing the received file descriptor is a pain, LibProtocol
implements `Download::stream_into(OutputStream)`, which can be used to
stream the download into any given output stream (be it a file, or
memory, or writing stuff with a delay, etc.).
Also, as some of the users of this API require all the downloaded data
upfront, LibProtocol also implements `set_should_buffer_all_input()`,
which causes the download instance to buffer all the data until the
download is complete, and to call the `on_buffered_download_finish`
hook.
2020-12-26 17:14:12 +03:30
|
|
|
{
|
2024-05-24 11:37:02 -04:00
|
|
|
VERIFY(m_mode == Mode::Unknown);
|
|
|
|
m_mode = Mode::Unbuffered;
|
|
|
|
|
|
|
|
this->on_headers_received = move(on_headers_received);
|
|
|
|
this->on_finish = move(on_finish);
|
ProtocolServer: Stream the downloaded data if possible
This patchset makes ProtocolServer stream the downloads to its client
(LibProtocol), and as such changes the download API; a possible
download lifecycle could be as such:
notation = client->server:'>', server->client:'<', pipe activity:'*'
```
> StartDownload(GET, url, headers, {})
< Response(0, fd 8)
* {data, 1024b}
< HeadersBecameAvailable(0, response_headers, 200)
< DownloadProgress(0, 4K, 1024)
* {data, 1024b}
* {data, 1024b}
< DownloadProgress(0, 4K, 2048)
* {data, 1024b}
< DownloadProgress(0, 4K, 1024)
< DownloadFinished(0, true, 4K)
```
Since managing the received file descriptor is a pain, LibProtocol
implements `Download::stream_into(OutputStream)`, which can be used to
stream the download into any given output stream (be it a file, or
memory, or writing stuff with a delay, etc.).
Also, as some of the users of this API require all the downloaded data
upfront, LibProtocol also implements `set_should_buffer_all_input()`,
which causes the download instance to buffer all the data until the
download is complete, and to call the `on_buffered_download_finish`
hook.
2020-12-26 17:14:12 +03:30
|
|
|
|
2024-05-24 11:37:02 -04:00
|
|
|
set_up_internal_stream_data(move(on_data_received));
|
|
|
|
}
|
|
|
|
|
2025-02-26 13:28:21 +00:00
|
|
|
void Request::did_finish(Badge<RequestClient>, u64 total_size, RequestTimingInfo const& timing_info, Optional<NetworkError> const& network_error)
|
2024-05-24 11:37:02 -04:00
|
|
|
{
|
|
|
|
if (on_finish)
|
2025-02-26 13:28:21 +00:00
|
|
|
on_finish(total_size, timing_info, network_error);
|
2019-11-24 13:20:44 +01:00
|
|
|
}
|
|
|
|
|
2024-10-23 16:46:26 -05:00
|
|
|
void Request::did_receive_headers(Badge<RequestClient>, HTTP::HeaderMap const& response_headers, Optional<u32> response_code, Optional<String> const& reason_phrase)
|
ProtocolServer: Stream the downloaded data if possible
This patchset makes ProtocolServer stream the downloads to its client
(LibProtocol), and as such changes the download API; a possible
download lifecycle could be as such:
notation = client->server:'>', server->client:'<', pipe activity:'*'
```
> StartDownload(GET, url, headers, {})
< Response(0, fd 8)
* {data, 1024b}
< HeadersBecameAvailable(0, response_headers, 200)
< DownloadProgress(0, 4K, 1024)
* {data, 1024b}
* {data, 1024b}
< DownloadProgress(0, 4K, 2048)
* {data, 1024b}
< DownloadProgress(0, 4K, 1024)
< DownloadFinished(0, true, 4K)
```
Since managing the received file descriptor is a pain, LibProtocol
implements `Download::stream_into(OutputStream)`, which can be used to
stream the download into any given output stream (be it a file, or
memory, or writing stuff with a delay, etc.).
Also, as some of the users of this API require all the downloaded data
upfront, LibProtocol also implements `set_should_buffer_all_input()`,
which causes the download instance to buffer all the data until the
download is complete, and to call the `on_buffered_download_finish`
hook.
2020-12-26 17:14:12 +03:30
|
|
|
{
|
|
|
|
if (on_headers_received)
|
2024-10-23 16:46:26 -05:00
|
|
|
on_headers_received(response_headers, response_code, reason_phrase);
|
ProtocolServer: Stream the downloaded data if possible
This patchset makes ProtocolServer stream the downloads to its client
(LibProtocol), and as such changes the download API; a possible
download lifecycle could be as such:
notation = client->server:'>', server->client:'<', pipe activity:'*'
```
> StartDownload(GET, url, headers, {})
< Response(0, fd 8)
* {data, 1024b}
< HeadersBecameAvailable(0, response_headers, 200)
< DownloadProgress(0, 4K, 1024)
* {data, 1024b}
* {data, 1024b}
< DownloadProgress(0, 4K, 2048)
* {data, 1024b}
< DownloadProgress(0, 4K, 1024)
< DownloadFinished(0, true, 4K)
```
Since managing the received file descriptor is a pain, LibProtocol
implements `Download::stream_into(OutputStream)`, which can be used to
stream the download into any given output stream (be it a file, or
memory, or writing stuff with a delay, etc.).
Also, as some of the users of this API require all the downloaded data
upfront, LibProtocol also implements `set_should_buffer_all_input()`,
which causes the download instance to buffer all the data until the
download is complete, and to call the `on_buffered_download_finish`
hook.
2020-12-26 17:14:12 +03:30
|
|
|
}
|
|
|
|
|
2021-04-23 22:45:52 +02:00
|
|
|
void Request::did_request_certificates(Badge<RequestClient>)
|
2020-08-02 05:27:42 +04:30
|
|
|
{
|
|
|
|
if (on_certificate_requested) {
|
|
|
|
auto result = on_certificate_requested();
|
|
|
|
if (!m_client->set_certificate({}, *this, result.certificate, result.key)) {
|
2021-04-23 22:45:52 +02:00
|
|
|
dbgln("Request: set_certificate failed");
|
2020-08-02 05:27:42 +04:30
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2024-05-24 11:37:02 -04:00
|
|
|
|
|
|
|
void Request::set_up_internal_stream_data(DataReceived on_data_available)
|
|
|
|
{
|
|
|
|
VERIFY(!m_internal_stream_data);
|
|
|
|
|
|
|
|
m_internal_stream_data = make<InternalStreamData>();
|
|
|
|
m_internal_stream_data->read_notifier = Core::Notifier::construct(fd(), Core::Notifier::Type::Read);
|
|
|
|
if (fd() != -1)
|
|
|
|
m_internal_stream_data->read_stream = MUST(Core::File::adopt_fd(fd(), Core::File::OpenMode::Read));
|
|
|
|
|
|
|
|
auto user_on_finish = move(on_finish);
|
2025-02-26 13:28:21 +00:00
|
|
|
on_finish = [this](auto total_size, auto const& timing_info, auto network_error) {
|
2024-05-24 11:37:02 -04:00
|
|
|
m_internal_stream_data->total_size = total_size;
|
2024-10-09 17:01:34 -05:00
|
|
|
m_internal_stream_data->network_error = network_error;
|
2025-02-26 13:28:21 +00:00
|
|
|
m_internal_stream_data->timing_info = timing_info;
|
2024-05-24 11:37:02 -04:00
|
|
|
m_internal_stream_data->request_done = true;
|
|
|
|
m_internal_stream_data->on_finish();
|
|
|
|
};
|
|
|
|
|
|
|
|
m_internal_stream_data->on_finish = [this, user_on_finish = move(user_on_finish)]() {
|
2024-11-03 09:44:24 +01:00
|
|
|
if (!m_internal_stream_data->user_finish_called && (!m_internal_stream_data->read_stream || m_internal_stream_data->read_stream->is_eof())) {
|
2024-05-24 11:37:02 -04:00
|
|
|
m_internal_stream_data->user_finish_called = true;
|
2025-02-26 13:28:21 +00:00
|
|
|
user_on_finish(m_internal_stream_data->total_size, m_internal_stream_data->timing_info, m_internal_stream_data->network_error);
|
2024-05-24 11:37:02 -04:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
m_internal_stream_data->read_notifier->on_activation = [this, on_data_available = move(on_data_available)]() {
|
|
|
|
static constexpr size_t buffer_size = 256 * KiB;
|
|
|
|
static char buffer[buffer_size];
|
|
|
|
|
|
|
|
do {
|
|
|
|
auto result = m_internal_stream_data->read_stream->read_some({ buffer, buffer_size });
|
2024-05-31 16:59:23 +02:00
|
|
|
if (result.is_error() && (!result.error().is_errno() || (result.error().is_errno() && result.error().code() != EINTR)))
|
2024-05-24 11:37:02 -04:00
|
|
|
break;
|
|
|
|
if (result.is_error())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
auto read_bytes = result.release_value();
|
|
|
|
if (read_bytes.is_empty())
|
|
|
|
break;
|
|
|
|
|
|
|
|
on_data_available(read_bytes);
|
|
|
|
} while (true);
|
|
|
|
|
|
|
|
if (m_internal_stream_data->read_stream->is_eof())
|
|
|
|
m_internal_stream_data->read_notifier->close();
|
|
|
|
|
|
|
|
if (m_internal_stream_data->request_done)
|
|
|
|
m_internal_stream_data->on_finish();
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2019-11-24 13:20:44 +01:00
|
|
|
}
|