Everywhere: Hoist the Libraries folder to the top-level

This commit is contained in:
Timothy Flynn 2024-11-09 12:25:08 -05:00 committed by Andreas Kling
parent 950e819ee7
commit 93712b24bf
Notes: github-actions[bot] 2024-11-10 11:51:52 +00:00
4547 changed files with 104 additions and 113 deletions

View file

@ -0,0 +1,349 @@
/*
* Copyright (c) 2024, Jelle Raaijmakers <jelle@ladybird.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include "FFmpegLoader.h"
#include <AK/BitStream.h>
#include <AK/NumericLimits.h>
#include <LibCore/System.h>
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100)
# define USE_FFMPEG_CH_LAYOUT
#endif
#if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(59, 0, 100)
# define USE_CONSTIFIED_POINTERS
#endif
namespace Audio {
static constexpr int BUFFER_MAX_PROBE_SIZE = 64 * KiB;
FFmpegIOContext::FFmpegIOContext(AVIOContext* avio_context)
: m_avio_context(avio_context)
{
}
FFmpegIOContext::~FFmpegIOContext()
{
// NOTE: free the buffer inside the AVIO context, since it might be changed since its initial allocation
av_free(m_avio_context->buffer);
avio_context_free(&m_avio_context);
}
ErrorOr<NonnullOwnPtr<FFmpegIOContext>, LoaderError> FFmpegIOContext::create(AK::SeekableStream& stream)
{
auto* avio_buffer = av_malloc(PAGE_SIZE);
if (avio_buffer == nullptr)
return LoaderError { LoaderError::Category::IO, "Failed to allocate AVIO buffer" };
// This AVIOContext explains to avformat how to interact with our stream
auto* avio_context = avio_alloc_context(
static_cast<unsigned char*>(avio_buffer),
PAGE_SIZE,
0,
&stream,
[](void* opaque, u8* buffer, int size) -> int {
auto& stream = *static_cast<SeekableStream*>(opaque);
AK::Bytes buffer_bytes { buffer, AK::min<size_t>(size, PAGE_SIZE) };
auto read_bytes_or_error = stream.read_some(buffer_bytes);
if (read_bytes_or_error.is_error()) {
if (read_bytes_or_error.error().code() == EOF)
return AVERROR_EOF;
return AVERROR_UNKNOWN;
}
int number_of_bytes_read = read_bytes_or_error.value().size();
if (number_of_bytes_read == 0)
return AVERROR_EOF;
return number_of_bytes_read;
},
nullptr,
[](void* opaque, int64_t offset, int whence) -> int64_t {
whence &= ~AVSEEK_FORCE;
auto& stream = *static_cast<SeekableStream*>(opaque);
if (whence == AVSEEK_SIZE)
return static_cast<int64_t>(stream.size().value());
auto seek_mode_from_whence = [](int origin) -> SeekMode {
if (origin == SEEK_CUR)
return SeekMode::FromCurrentPosition;
if (origin == SEEK_END)
return SeekMode::FromEndPosition;
return SeekMode::SetPosition;
};
auto offset_or_error = stream.seek(offset, seek_mode_from_whence(whence));
if (offset_or_error.is_error())
return -EIO;
return 0;
});
if (avio_context == nullptr) {
av_free(avio_buffer);
return LoaderError { LoaderError::Category::IO, "Failed to allocate AVIO context" };
}
return make<FFmpegIOContext>(avio_context);
}
FFmpegLoaderPlugin::FFmpegLoaderPlugin(NonnullOwnPtr<SeekableStream> stream, NonnullOwnPtr<FFmpegIOContext> io_context)
: LoaderPlugin(move(stream))
, m_io_context(move(io_context))
{
}
FFmpegLoaderPlugin::~FFmpegLoaderPlugin()
{
if (m_frame != nullptr)
av_frame_free(&m_frame);
if (m_packet != nullptr)
av_packet_free(&m_packet);
if (m_codec_context != nullptr)
avcodec_free_context(&m_codec_context);
if (m_format_context != nullptr)
avformat_close_input(&m_format_context);
}
ErrorOr<NonnullOwnPtr<LoaderPlugin>, LoaderError> FFmpegLoaderPlugin::create(NonnullOwnPtr<SeekableStream> stream)
{
auto io_context = TRY(FFmpegIOContext::create(*stream));
auto loader = make<FFmpegLoaderPlugin>(move(stream), move(io_context));
TRY(loader->initialize());
return loader;
}
MaybeLoaderError FFmpegLoaderPlugin::initialize()
{
// Open the container
m_format_context = avformat_alloc_context();
if (m_format_context == nullptr)
return LoaderError { LoaderError::Category::IO, "Failed to allocate format context" };
m_format_context->pb = m_io_context->avio_context();
if (avformat_open_input(&m_format_context, nullptr, nullptr, nullptr) < 0)
return LoaderError { LoaderError::Category::IO, "Failed to open input for format parsing" };
// Read stream info; doing this is required for headerless formats like MPEG
if (avformat_find_stream_info(m_format_context, nullptr) < 0)
return LoaderError { LoaderError::Category::IO, "Failed to find stream info" };
#ifdef USE_CONSTIFIED_POINTERS
AVCodec const* codec {};
#else
AVCodec* codec {};
#endif
// Find the best stream to play within the container
int best_stream_index = av_find_best_stream(m_format_context, AVMediaType::AVMEDIA_TYPE_AUDIO, -1, -1, &codec, 0);
if (best_stream_index == AVERROR_STREAM_NOT_FOUND)
return LoaderError { LoaderError::Category::Format, "No audio stream found in container" };
if (best_stream_index == AVERROR_DECODER_NOT_FOUND)
return LoaderError { LoaderError::Category::Format, "No suitable decoder found for stream" };
if (best_stream_index < 0)
return LoaderError { LoaderError::Category::Format, "Failed to find an audio stream" };
m_audio_stream = m_format_context->streams[best_stream_index];
// Set up the context to decode the audio stream
m_codec_context = avcodec_alloc_context3(codec);
if (m_codec_context == nullptr)
return LoaderError { LoaderError::Category::IO, "Failed to allocate the codec context" };
if (avcodec_parameters_to_context(m_codec_context, m_audio_stream->codecpar) < 0)
return LoaderError { LoaderError::Category::IO, "Failed to copy codec parameters" };
m_codec_context->pkt_timebase = m_audio_stream->time_base;
m_codec_context->thread_count = AK::min(static_cast<int>(Core::System::hardware_concurrency()), 4);
if (avcodec_open2(m_codec_context, codec, nullptr) < 0)
return LoaderError { LoaderError::Category::IO, "Failed to open input for decoding" };
// This is an initial estimate of the total number of samples in the stream.
// During decoding, we might need to increase the number as more frames come in.
double duration_in_seconds = static_cast<double>(m_audio_stream->duration) * time_base();
if (duration_in_seconds < 0)
return LoaderError { LoaderError::Category::Format, "Negative stream duration" };
m_total_samples = AK::round_to<decltype(m_total_samples)>(sample_rate() * duration_in_seconds);
// Allocate packet (logical chunk of data) and frame (video / audio frame) buffers
m_packet = av_packet_alloc();
if (m_packet == nullptr)
return LoaderError { LoaderError::Category::IO, "Failed to allocate packet" };
m_frame = av_frame_alloc();
if (m_frame == nullptr)
return LoaderError { LoaderError::Category::IO, "Failed to allocate frame" };
return {};
}
double FFmpegLoaderPlugin::time_base() const
{
return av_q2d(m_audio_stream->time_base);
}
bool FFmpegLoaderPlugin::sniff(SeekableStream& stream)
{
auto io_context = MUST(FFmpegIOContext::create(stream));
#ifdef USE_CONSTIFIED_POINTERS
AVInputFormat const* detected_format {};
#else
AVInputFormat* detected_format {};
#endif
auto score = av_probe_input_buffer2(io_context->avio_context(), &detected_format, nullptr, nullptr, 0, BUFFER_MAX_PROBE_SIZE);
return score > 0;
}
static ErrorOr<FixedArray<Sample>> extract_samples_from_frame(AVFrame& frame)
{
size_t number_of_samples = frame.nb_samples;
VERIFY(number_of_samples > 0);
#ifdef USE_FFMPEG_CH_LAYOUT
size_t number_of_channels = frame.ch_layout.nb_channels;
#else
size_t number_of_channels = frame.channels;
#endif
auto format = static_cast<AVSampleFormat>(frame.format);
auto packed_format = av_get_packed_sample_fmt(format);
auto is_planar = av_sample_fmt_is_planar(format) == 1;
// FIXME: handle number_of_channels > 2
if (number_of_channels != 1 && number_of_channels != 2)
return Error::from_string_view("Unsupported number of channels"sv);
switch (format) {
case AV_SAMPLE_FMT_FLTP:
case AV_SAMPLE_FMT_S16:
case AV_SAMPLE_FMT_S32:
break;
default:
// FIXME: handle other formats
return Error::from_string_view("Unsupported sample format"sv);
}
auto get_plane_pointer = [&](size_t channel_index) -> uint8_t* {
return is_planar ? frame.extended_data[channel_index] : frame.extended_data[0];
};
auto index_in_plane = [&](size_t sample_index, size_t channel_index) {
if (is_planar)
return sample_index;
return sample_index * number_of_channels + channel_index;
};
auto read_sample = [&](uint8_t* data, size_t index) -> float {
switch (packed_format) {
case AV_SAMPLE_FMT_FLT:
return reinterpret_cast<float*>(data)[index];
case AV_SAMPLE_FMT_S16:
return reinterpret_cast<i16*>(data)[index] / static_cast<float>(NumericLimits<i16>::max());
case AV_SAMPLE_FMT_S32:
return reinterpret_cast<i32*>(data)[index] / static_cast<float>(NumericLimits<i32>::max());
default:
VERIFY_NOT_REACHED();
}
};
auto samples = TRY(FixedArray<Sample>::create(number_of_samples));
for (size_t sample = 0; sample < number_of_samples; ++sample) {
if (number_of_channels == 1) {
samples.unchecked_at(sample) = Sample { read_sample(get_plane_pointer(0), index_in_plane(sample, 0)) };
} else {
samples.unchecked_at(sample) = Sample {
read_sample(get_plane_pointer(0), index_in_plane(sample, 0)),
read_sample(get_plane_pointer(1), index_in_plane(sample, 1)),
};
}
}
return samples;
}
ErrorOr<Vector<FixedArray<Sample>>, LoaderError> FFmpegLoaderPlugin::load_chunks(size_t samples_to_read_from_input)
{
Vector<FixedArray<Sample>> chunks {};
do {
// Obtain a packet
auto read_frame_error = av_read_frame(m_format_context, m_packet);
if (read_frame_error < 0) {
if (read_frame_error == AVERROR_EOF)
break;
return LoaderError { LoaderError::Category::IO, "Failed to read frame" };
}
if (m_packet->stream_index != m_audio_stream->index) {
av_packet_unref(m_packet);
continue;
}
// Send the packet to the decoder
if (avcodec_send_packet(m_codec_context, m_packet) < 0)
return LoaderError { LoaderError::Category::IO, "Failed to send packet" };
av_packet_unref(m_packet);
// Ask the decoder for a new frame. We might not have sent enough data yet
auto receive_frame_error = avcodec_receive_frame(m_codec_context, m_frame);
if (receive_frame_error != 0) {
if (receive_frame_error == AVERROR(EAGAIN))
continue;
if (receive_frame_error == AVERROR_EOF)
break;
return LoaderError { LoaderError::Category::IO, "Failed to receive frame" };
}
chunks.append(TRY(extract_samples_from_frame(*m_frame)));
// Use the frame's presentation timestamp to set the number of loaded samples
m_loaded_samples = static_cast<int>(m_frame->pts * sample_rate() * time_base());
if (m_loaded_samples > m_total_samples) [[unlikely]]
m_total_samples = m_loaded_samples;
samples_to_read_from_input -= AK::min(samples_to_read_from_input, m_frame->nb_samples);
} while (samples_to_read_from_input > 0);
return chunks;
}
MaybeLoaderError FFmpegLoaderPlugin::reset()
{
return seek(0);
}
MaybeLoaderError FFmpegLoaderPlugin::seek(int sample_index)
{
auto sample_position_in_seconds = static_cast<double>(sample_index) / sample_rate();
auto sample_timestamp = AK::round_to<int64_t>(sample_position_in_seconds / time_base());
if (av_seek_frame(m_format_context, m_audio_stream->index, sample_timestamp, AVSEEK_FLAG_ANY) < 0)
return LoaderError { LoaderError::Category::IO, "Failed to seek" };
avcodec_flush_buffers(m_codec_context);
m_loaded_samples = sample_index;
return {};
}
u32 FFmpegLoaderPlugin::sample_rate()
{
VERIFY(m_codec_context != nullptr);
return m_codec_context->sample_rate;
}
u16 FFmpegLoaderPlugin::num_channels()
{
VERIFY(m_codec_context != nullptr);
#ifdef USE_FFMPEG_CH_LAYOUT
return m_codec_context->ch_layout.nb_channels;
#else
return m_codec_context->channels;
#endif
}
PcmSampleFormat FFmpegLoaderPlugin::pcm_format()
{
// FIXME: pcm_format() is unused, always return Float for now
return PcmSampleFormat::Float32;
}
ByteString FFmpegLoaderPlugin::format_name()
{
if (!m_format_context)
return "unknown";
return m_format_context->iformat->name;
}
}

View file

@ -0,0 +1,68 @@
/*
* Copyright (c) 2024, Jelle Raaijmakers <jelle@ladybird.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include "Loader.h"
#include <AK/Error.h>
#include <AK/NonnullOwnPtr.h>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/samplefmt.h>
}
namespace Audio {
class FFmpegIOContext {
public:
explicit FFmpegIOContext(AVIOContext*);
~FFmpegIOContext();
static ErrorOr<NonnullOwnPtr<FFmpegIOContext>, LoaderError> create(AK::SeekableStream& stream);
AVIOContext* avio_context() const { return m_avio_context; }
private:
AVIOContext* m_avio_context { nullptr };
};
class FFmpegLoaderPlugin : public LoaderPlugin {
public:
explicit FFmpegLoaderPlugin(NonnullOwnPtr<SeekableStream>, NonnullOwnPtr<FFmpegIOContext>);
virtual ~FFmpegLoaderPlugin();
static bool sniff(SeekableStream& stream);
static ErrorOr<NonnullOwnPtr<LoaderPlugin>, LoaderError> create(NonnullOwnPtr<SeekableStream>);
virtual ErrorOr<Vector<FixedArray<Sample>>, LoaderError> load_chunks(size_t samples_to_read_from_input) override;
virtual MaybeLoaderError reset() override;
virtual MaybeLoaderError seek(int sample_index) override;
virtual int loaded_samples() override { return m_loaded_samples; }
virtual int total_samples() override { return m_total_samples; }
virtual u32 sample_rate() override;
virtual u16 num_channels() override;
virtual PcmSampleFormat pcm_format() override;
virtual ByteString format_name() override;
private:
MaybeLoaderError initialize();
double time_base() const;
AVStream* m_audio_stream;
AVCodecContext* m_codec_context { nullptr };
AVFormatContext* m_format_context { nullptr };
AVFrame* m_frame { nullptr };
NonnullOwnPtr<FFmpegIOContext> m_io_context;
int m_loaded_samples { 0 };
AVPacket* m_packet { nullptr };
int m_total_samples { 0 };
};
}

View file

@ -0,0 +1,16 @@
/*
* Copyright (c) 2023, Tim Flynn <trflynn89@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
namespace Audio {
class ConnectionToServer;
class Loader;
class PlaybackStream;
struct Sample;
}

View file

@ -0,0 +1,111 @@
/*
* Copyright (c) 2018-2023, the SerenityOS developers.
* Copyright (c) 2024, Jelle Raaijmakers <jelle@ladybird.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include "Loader.h"
#include "FFmpegLoader.h"
#include <AK/TypedTransfer.h>
#include <LibCore/MappedFile.h>
namespace Audio {
LoaderPlugin::LoaderPlugin(NonnullOwnPtr<SeekableStream> stream)
: m_stream(move(stream))
{
}
Loader::Loader(NonnullOwnPtr<LoaderPlugin> plugin)
: m_plugin(move(plugin))
{
}
struct LoaderPluginInitializer {
bool (*sniff)(SeekableStream&);
ErrorOr<NonnullOwnPtr<LoaderPlugin>, LoaderError> (*create)(NonnullOwnPtr<SeekableStream>);
};
static constexpr LoaderPluginInitializer s_initializers[] = {
{ FFmpegLoaderPlugin::sniff, FFmpegLoaderPlugin::create },
};
ErrorOr<NonnullRefPtr<Loader>, LoaderError> Loader::create(StringView path)
{
auto stream = TRY(Core::MappedFile::map(path, Core::MappedFile::Mode::ReadOnly));
auto plugin = TRY(Loader::create_plugin(move(stream)));
return adopt_ref(*new (nothrow) Loader(move(plugin)));
}
ErrorOr<NonnullRefPtr<Loader>, LoaderError> Loader::create(ReadonlyBytes buffer)
{
auto stream = TRY(try_make<FixedMemoryStream>(buffer));
auto plugin = TRY(Loader::create_plugin(move(stream)));
return adopt_ref(*new (nothrow) Loader(move(plugin)));
}
ErrorOr<NonnullOwnPtr<LoaderPlugin>, LoaderError> Loader::create_plugin(NonnullOwnPtr<SeekableStream> stream)
{
for (auto const& loader : s_initializers) {
if (loader.sniff(*stream)) {
TRY(stream->seek(0, SeekMode::SetPosition));
return loader.create(move(stream));
}
TRY(stream->seek(0, SeekMode::SetPosition));
}
return LoaderError { "No loader plugin available" };
}
LoaderSamples Loader::get_more_samples(size_t samples_to_read_from_input)
{
if (m_plugin_at_end_of_stream && m_buffer.is_empty())
return FixedArray<Sample> {};
size_t remaining_samples = total_samples() - loaded_samples();
size_t samples_to_read = min(remaining_samples, samples_to_read_from_input);
auto samples = TRY(FixedArray<Sample>::create(samples_to_read));
size_t sample_index = 0;
if (m_buffer.size() > 0) {
size_t to_transfer = min(m_buffer.size(), samples_to_read);
AK::TypedTransfer<Sample>::move(samples.data(), m_buffer.data(), to_transfer);
if (to_transfer < m_buffer.size())
m_buffer.remove(0, to_transfer);
else
m_buffer.clear_with_capacity();
sample_index += to_transfer;
}
while (sample_index < samples_to_read) {
auto chunk_data = TRY(m_plugin->load_chunks(samples_to_read - sample_index));
chunk_data.remove_all_matching([](auto& chunk) { return chunk.is_empty(); });
if (chunk_data.is_empty()) {
m_plugin_at_end_of_stream = true;
break;
}
for (auto& chunk : chunk_data) {
if (sample_index < samples_to_read) {
auto count = min(samples_to_read - sample_index, chunk.size());
AK::TypedTransfer<Sample>::move(samples.span().offset(sample_index), chunk.data(), count);
// We didn't read all of the chunk; transfer the rest into the buffer.
if (count < chunk.size()) {
auto remaining_samples_count = chunk.size() - count;
// We will always have an empty buffer at this point!
TRY(m_buffer.try_append(chunk.span().offset(count), remaining_samples_count));
}
} else {
// We're now past what the user requested. Transfer the entirety of the data into the buffer.
TRY(m_buffer.try_append(chunk.data(), chunk.size()));
}
sample_index += chunk.size();
}
}
return samples;
}
}

View file

@ -0,0 +1,115 @@
/*
* Copyright (c) 2018-2022, the SerenityOS developers.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include "LoaderError.h"
#include "Sample.h"
#include "SampleFormats.h"
#include <AK/Error.h>
#include <AK/FixedArray.h>
#include <AK/NonnullOwnPtr.h>
#include <AK/NonnullRefPtr.h>
#include <AK/RefCounted.h>
#include <AK/Stream.h>
#include <AK/StringView.h>
#include <AK/Vector.h>
namespace Audio {
// Experimentally determined to be a decent buffer size on i686:
// 4K (the default) is slightly worse, and 64K is much worse.
// At sufficiently large buffer sizes, the advantage of infrequent read() calls is outweighed by the memmove() overhead.
// There was no intensive fine-tuning done to determine this value, so improvements may definitely be possible.
constexpr size_t const loader_buffer_size = 8 * KiB;
// Two seek points should ideally not be farther apart than this.
// This variable is a heuristic for seek table-constructing loaders.
constexpr u64 const maximum_seekpoint_distance_ms = 1000;
// Seeking should be at least as precise as this.
// That means: The actual achieved seek position must not be more than this amount of time before the requested seek position.
constexpr u64 const seek_tolerance_ms = 5000;
using LoaderSamples = ErrorOr<FixedArray<Sample>, LoaderError>;
using MaybeLoaderError = ErrorOr<void, LoaderError>;
class LoaderPlugin {
public:
explicit LoaderPlugin(NonnullOwnPtr<SeekableStream> stream);
virtual ~LoaderPlugin() = default;
// Load as many audio chunks as necessary to get up to the required samples.
// A chunk can be anything that is convenient for the plugin to load in one go without requiring to move samples around different buffers.
// For example: A FLAC, MP3 or QOA frame.
// The chunks are returned in a vector, so the loader can simply add chunks until the requested sample amount is reached.
// The sample count MAY be surpassed, but only as little as possible. It CAN be undershot when the end of the stream is reached.
// If the loader has no chunking limitations (e.g. WAV), it may return a single exact-sized chunk.
virtual ErrorOr<Vector<FixedArray<Sample>>, LoaderError> load_chunks(size_t samples_to_read_from_input) = 0;
virtual MaybeLoaderError reset() = 0;
virtual MaybeLoaderError seek(int const sample_index) = 0;
// total_samples() and loaded_samples() should be independent
// of the number of channels.
//
// For example, with a three-second-long, stereo, 44.1KHz audio file:
// num_channels() should return 2
// sample_rate() should return 44100 (each channel is sampled at this rate)
// total_samples() should return 132300 (sample_rate * three seconds)
virtual int loaded_samples() = 0;
virtual int total_samples() = 0;
virtual u32 sample_rate() = 0;
virtual u16 num_channels() = 0;
// Human-readable name of the file format, of the form <full abbreviation> (.<ending>)
virtual ByteString format_name() = 0;
virtual PcmSampleFormat pcm_format() = 0;
protected:
NonnullOwnPtr<SeekableStream> m_stream;
};
class Loader : public RefCounted<Loader> {
public:
static ErrorOr<NonnullRefPtr<Loader>, LoaderError> create(StringView path);
static ErrorOr<NonnullRefPtr<Loader>, LoaderError> create(ReadonlyBytes buffer);
// Will only read less samples if we're at the end of the stream.
LoaderSamples get_more_samples(size_t samples_to_read_from_input = 128 * KiB);
MaybeLoaderError reset() const
{
m_plugin_at_end_of_stream = false;
return m_plugin->reset();
}
MaybeLoaderError seek(int const position) const
{
m_buffer.clear_with_capacity();
m_plugin_at_end_of_stream = false;
return m_plugin->seek(position);
}
int loaded_samples() const { return m_plugin->loaded_samples() - (int)m_buffer.size(); }
int total_samples() const { return m_plugin->total_samples(); }
u32 sample_rate() const { return m_plugin->sample_rate(); }
u16 num_channels() const { return m_plugin->num_channels(); }
ByteString format_name() const { return m_plugin->format_name(); }
u16 bits_per_sample() const { return pcm_bits_per_sample(m_plugin->pcm_format()); }
PcmSampleFormat pcm_format() const { return m_plugin->pcm_format(); }
private:
static ErrorOr<NonnullOwnPtr<LoaderPlugin>, LoaderError> create_plugin(NonnullOwnPtr<SeekableStream> stream);
explicit Loader(NonnullOwnPtr<LoaderPlugin>);
mutable NonnullOwnPtr<LoaderPlugin> m_plugin;
// The plugin can signal an end of stream by returning no (or only empty) chunks.
mutable bool m_plugin_at_end_of_stream { false };
mutable Vector<Sample, loader_buffer_size> m_buffer;
};
}

View file

@ -0,0 +1,96 @@
/*
* Copyright (c) 2021, kleines Filmröllchen <filmroellchen@serenityos.org>.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/DeprecatedFlyString.h>
#include <AK/Error.h>
#include <errno.h>
namespace Audio {
struct LoaderError {
enum class Category : u32 {
// The error category is unknown.
Unknown = 0,
IO,
// The read file doesn't follow the file format.
Format,
// Equivalent to an ASSERT(), except non-crashing.
Internal,
// The loader encountered something in the format that is not yet implemented.
Unimplemented,
};
Category category { Category::Unknown };
// Binary index: where in the file the error occurred.
size_t index { 0 };
DeprecatedFlyString description { ByteString::empty() };
constexpr LoaderError() = default;
LoaderError(Category category, size_t index, DeprecatedFlyString description)
: category(category)
, index(index)
, description(move(description))
{
}
LoaderError(DeprecatedFlyString description)
: description(move(description))
{
}
LoaderError(Category category, DeprecatedFlyString description)
: category(category)
, description(move(description))
{
}
LoaderError(LoaderError&) = default;
LoaderError(LoaderError&&) = default;
LoaderError(Error&& error)
{
if (error.is_errno()) {
auto code = error.code();
description = ByteString::formatted("{} ({})", strerror(code), code);
if (code == EBADF || code == EBUSY || code == EEXIST || code == EIO || code == EISDIR || code == ENOENT || code == ENOMEM || code == EPIPE)
category = Category::IO;
} else {
description = error.string_literal();
}
}
};
}
namespace AK {
template<>
struct Formatter<Audio::LoaderError> : Formatter<FormatString> {
ErrorOr<void> format(FormatBuilder& builder, Audio::LoaderError const& error)
{
StringView category;
switch (error.category) {
case Audio::LoaderError::Category::Unknown:
category = "Unknown"sv;
break;
case Audio::LoaderError::Category::IO:
category = "I/O"sv;
break;
case Audio::LoaderError::Category::Format:
category = "Format"sv;
break;
case Audio::LoaderError::Category::Internal:
category = "Internal"sv;
break;
case Audio::LoaderError::Category::Unimplemented:
category = "Unimplemented"sv;
break;
}
return Formatter<FormatString>::format(builder, "{} error: {} (at {})"sv, category, error.description, error.index);
}
};
}

View file

@ -0,0 +1,38 @@
/*
* Copyright (c) 2023, Gregory Bertilson <zaggy1024@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include "PlaybackStream.h"
#include <AK/Platform.h>
#include <LibCore/ThreadedPromise.h>
#if defined(HAVE_PULSEAUDIO)
# include "PlaybackStreamPulseAudio.h"
#elif defined(AK_OS_MACOS)
# include "PlaybackStreamAudioUnit.h"
#elif defined(AK_OS_ANDROID)
# include "PlaybackStreamOboe.h"
#endif
namespace Audio {
ErrorOr<NonnullRefPtr<PlaybackStream>> PlaybackStream::create(OutputState initial_output_state, u32 sample_rate, u8 channels, u32 target_latency_ms, AudioDataRequestCallback&& data_request_callback)
{
VERIFY(data_request_callback);
// Create the platform-specific implementation for this stream.
#if defined(HAVE_PULSEAUDIO)
return PlaybackStreamPulseAudio::create(initial_output_state, sample_rate, channels, target_latency_ms, move(data_request_callback));
#elif defined(AK_OS_MACOS)
return PlaybackStreamAudioUnit::create(initial_output_state, sample_rate, channels, target_latency_ms, move(data_request_callback));
#elif defined(AK_OS_ANDROID)
return PlaybackStreamOboe::create(initial_output_state, sample_rate, channels, target_latency_ms, move(data_request_callback));
#else
(void)initial_output_state, (void)sample_rate, (void)channels, (void)target_latency_ms;
return Error::from_string_literal("Audio output is not available for this platform");
#endif
}
}

View file

@ -0,0 +1,72 @@
/*
* Copyright (c) 2023, Gregory Bertilson <zaggy1024@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include "SampleFormats.h"
#include <AK/AtomicRefCounted.h>
#include <AK/Function.h>
#include <AK/Queue.h>
#include <AK/Time.h>
#include <LibCore/Forward.h>
#include <LibThreading/ConditionVariable.h>
#include <LibThreading/MutexProtected.h>
#include <LibThreading/Thread.h>
namespace Audio {
enum class OutputState {
Playing,
Suspended,
};
// This class implements high-level audio playback behavior. It is primarily intended as an abstract cross-platform
// interface to be used by Ladybird (and its dependent libraries) for playback.
//
// The interface is designed to be simple and robust. All control functions can be called safely from any thread.
// Timing information provided by the class should allow audio timestamps to be tracked with the best accuracy possible.
class PlaybackStream : public AtomicRefCounted<PlaybackStream> {
public:
using AudioDataRequestCallback = Function<ReadonlyBytes(Bytes buffer, PcmSampleFormat format, size_t sample_count)>;
// Creates a new audio Output class.
//
// The initial_output_state parameter determines whether it will begin playback immediately.
//
// The AudioDataRequestCallback will be called when the Output needs more audio data to fill
// its buffers and continue playback.
static ErrorOr<NonnullRefPtr<PlaybackStream>> create(OutputState initial_output_state, u32 sample_rate, u8 channels, u32 target_latency_ms, AudioDataRequestCallback&&);
virtual ~PlaybackStream() = default;
// Sets the callback function that will be fired whenever the server consumes more data than is made available
// by the data request callback. It will fire when either the data request runs too long, or the data request
// returns no data. If all the input data has been exhausted and this event fires, that means that playback
// has ended.
virtual void set_underrun_callback(Function<void()>) = 0;
// Resume playback from the suspended state, requesting new data for audio buffers as soon as possible.
//
// The value provided to the promise resolution will match the `total_time_played()` at the exact moment that
// the stream was resumed.
virtual NonnullRefPtr<Core::ThreadedPromise<AK::Duration>> resume() = 0;
// Completes playback of any buffered audio data and then suspends playback and buffering.
virtual NonnullRefPtr<Core::ThreadedPromise<void>> drain_buffer_and_suspend() = 0;
// Drops any buffered audio data and then suspends playback and buffering. This can used be to stop playback
// as soon as possible instead of waiting for remaining audio to play.
virtual NonnullRefPtr<Core::ThreadedPromise<void>> discard_buffer_and_suspend() = 0;
// Returns a accurate monotonically-increasing time duration that is based on the number of samples that have
// been played by the output device. The value is interpolated and takes into account latency to the speakers
// whenever possible.
//
// This function should be able to run from any thread safely.
virtual ErrorOr<AK::Duration> total_time_played() = 0;
virtual NonnullRefPtr<Core::ThreadedPromise<void>> set_volume(double volume) = 0;
};
}

View file

@ -0,0 +1,391 @@
/*
* Copyright (c) 2023, Andrew Kaster <akaster@serenityos.org>
* Copyright (c) 2023, Tim Flynn <trflynn89@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include "PlaybackStreamAudioUnit.h"
#include <AK/Atomic.h>
#include <AK/SourceLocation.h>
#include <LibCore/SharedCircularQueue.h>
#include <LibCore/ThreadedPromise.h>
#include <AudioUnit/AudioUnit.h>
namespace Audio {
static constexpr AudioUnitElement AUDIO_UNIT_OUTPUT_BUS = 0;
static void log_os_error_code(OSStatus error_code, SourceLocation location = SourceLocation::current());
#define AU_TRY(expression) \
({ \
/* Ignore -Wshadow to allow nesting the macro. */ \
AK_IGNORE_DIAGNOSTIC("-Wshadow", auto&& _temporary_result = (expression)); \
if (_temporary_result != noErr) [[unlikely]] { \
log_os_error_code(_temporary_result); \
return Error::from_errno(_temporary_result); \
} \
})
struct AudioTask {
enum class Type {
Play,
Pause,
PauseAndDiscard,
Volume,
};
void resolve(AK::Duration time)
{
promise.visit(
[](Empty) { VERIFY_NOT_REACHED(); },
[&](NonnullRefPtr<Core::ThreadedPromise<void>>& promise) {
promise->resolve();
},
[&](NonnullRefPtr<Core::ThreadedPromise<AK::Duration>>& promise) {
promise->resolve(move(time));
});
}
void reject(OSStatus error)
{
log_os_error_code(error);
promise.visit(
[](Empty) { VERIFY_NOT_REACHED(); },
[error](auto& promise) {
promise->reject(Error::from_errno(error));
});
}
Type type;
Variant<Empty, NonnullRefPtr<Core::ThreadedPromise<void>>, NonnullRefPtr<Core::ThreadedPromise<AK::Duration>>> promise;
Optional<double> data {};
};
class AudioState : public RefCounted<AudioState> {
public:
using AudioTaskQueue = Core::SharedSingleProducerCircularQueue<AudioTask>;
static ErrorOr<NonnullRefPtr<AudioState>> create(AudioStreamBasicDescription description, PlaybackStream::AudioDataRequestCallback data_request_callback, OutputState initial_output_state)
{
auto task_queue = TRY(AudioTaskQueue::create());
auto state = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) AudioState(description, move(task_queue), move(data_request_callback), initial_output_state)));
AudioComponentDescription component_description;
component_description.componentType = kAudioUnitType_Output;
component_description.componentSubType = kAudioUnitSubType_DefaultOutput;
component_description.componentManufacturer = kAudioUnitManufacturer_Apple;
component_description.componentFlags = 0;
component_description.componentFlagsMask = 0;
auto* component = AudioComponentFindNext(NULL, &component_description);
AU_TRY(AudioComponentInstanceNew(component, &state->m_audio_unit));
AU_TRY(AudioUnitSetProperty(
state->m_audio_unit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
AUDIO_UNIT_OUTPUT_BUS,
&description,
sizeof(description)));
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = &AudioState::on_audio_unit_buffer_request;
callbackStruct.inputProcRefCon = state.ptr();
AU_TRY(AudioUnitSetProperty(
state->m_audio_unit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
AUDIO_UNIT_OUTPUT_BUS,
&callbackStruct,
sizeof(callbackStruct)));
AU_TRY(AudioUnitInitialize(state->m_audio_unit));
AU_TRY(AudioOutputUnitStart(state->m_audio_unit));
return state;
}
~AudioState()
{
if (m_audio_unit != nullptr)
AudioOutputUnitStop(m_audio_unit);
}
ErrorOr<void> queue_task(AudioTask task)
{
return m_task_queue.blocking_enqueue(move(task), []() {
usleep(10'000);
});
}
AK::Duration last_sample_time() const
{
return AK::Duration::from_milliseconds(m_last_sample_time.load());
}
private:
AudioState(AudioStreamBasicDescription description, AudioTaskQueue task_queue, PlaybackStream::AudioDataRequestCallback data_request_callback, OutputState initial_output_state)
: m_description(description)
, m_task_queue(move(task_queue))
, m_paused(initial_output_state == OutputState::Playing ? Paused::No : Paused::Yes)
, m_data_request_callback(move(data_request_callback))
{
}
static OSStatus on_audio_unit_buffer_request(void* user_data, AudioUnitRenderActionFlags*, AudioTimeStamp const* time_stamp, UInt32 element, UInt32 frames_to_render, AudioBufferList* output_buffer_list)
{
VERIFY(element == AUDIO_UNIT_OUTPUT_BUS);
VERIFY(output_buffer_list->mNumberBuffers == 1);
auto& state = *static_cast<AudioState*>(user_data);
VERIFY(time_stamp->mFlags & kAudioTimeStampSampleTimeValid);
auto sample_time_seconds = time_stamp->mSampleTime / state.m_description.mSampleRate;
auto last_sample_time = static_cast<i64>(sample_time_seconds * 1000.0);
state.m_last_sample_time.store(last_sample_time);
if (auto result = state.m_task_queue.dequeue(); result.is_error()) {
VERIFY(result.error() == AudioTaskQueue::QueueStatus::Empty);
} else {
auto task = result.release_value();
OSStatus error = noErr;
switch (task.type) {
case AudioTask::Type::Play:
state.m_paused = Paused::No;
break;
case AudioTask::Type::Pause:
state.m_paused = Paused::Yes;
break;
case AudioTask::Type::PauseAndDiscard:
error = AudioUnitReset(state.m_audio_unit, kAudioUnitScope_Global, AUDIO_UNIT_OUTPUT_BUS);
state.m_paused = Paused::Yes;
break;
case AudioTask::Type::Volume:
VERIFY(task.data.has_value());
error = AudioUnitSetParameter(state.m_audio_unit, kHALOutputParam_Volume, kAudioUnitScope_Global, 0, static_cast<float>(*task.data), 0);
break;
}
if (error == noErr)
task.resolve(AK::Duration::from_milliseconds(last_sample_time));
else
task.reject(error);
}
Bytes output_buffer {
reinterpret_cast<u8*>(output_buffer_list->mBuffers[0].mData),
output_buffer_list->mBuffers[0].mDataByteSize
};
if (state.m_paused == Paused::No) {
auto written_bytes = state.m_data_request_callback(output_buffer, PcmSampleFormat::Float32, frames_to_render);
if (written_bytes.is_empty())
state.m_paused = Paused::Yes;
}
if (state.m_paused == Paused::Yes)
output_buffer.fill(0);
return noErr;
}
AudioComponentInstance m_audio_unit { nullptr };
AudioStreamBasicDescription m_description {};
AudioTaskQueue m_task_queue;
enum class Paused {
Yes,
No,
};
Paused m_paused { Paused::Yes };
PlaybackStream::AudioDataRequestCallback m_data_request_callback;
Atomic<i64> m_last_sample_time { 0 };
};
ErrorOr<NonnullRefPtr<PlaybackStream>> PlaybackStreamAudioUnit::create(OutputState initial_output_state, u32 sample_rate, u8 channels, u32, AudioDataRequestCallback&& data_request_callback)
{
AudioStreamBasicDescription description {};
description.mFormatID = kAudioFormatLinearPCM;
description.mFormatFlags = kLinearPCMFormatFlagIsFloat | kLinearPCMFormatFlagIsPacked;
description.mSampleRate = sample_rate;
description.mChannelsPerFrame = channels;
description.mBitsPerChannel = sizeof(float) * 8;
description.mBytesPerFrame = sizeof(float) * channels;
description.mBytesPerPacket = description.mBytesPerFrame;
description.mFramesPerPacket = 1;
auto state = TRY(AudioState::create(description, move(data_request_callback), initial_output_state));
return TRY(adopt_nonnull_ref_or_enomem(new (nothrow) PlaybackStreamAudioUnit(move(state))));
}
PlaybackStreamAudioUnit::PlaybackStreamAudioUnit(NonnullRefPtr<AudioState> impl)
: m_state(move(impl))
{
}
PlaybackStreamAudioUnit::~PlaybackStreamAudioUnit() = default;
void PlaybackStreamAudioUnit::set_underrun_callback(Function<void()>)
{
// FIXME: Implement this.
}
NonnullRefPtr<Core::ThreadedPromise<AK::Duration>> PlaybackStreamAudioUnit::resume()
{
auto promise = Core::ThreadedPromise<AK::Duration>::create();
AudioTask task { AudioTask::Type::Play, promise };
if (auto result = m_state->queue_task(move(task)); result.is_error())
promise->reject(result.release_error());
return promise;
}
NonnullRefPtr<Core::ThreadedPromise<void>> PlaybackStreamAudioUnit::drain_buffer_and_suspend()
{
auto promise = Core::ThreadedPromise<void>::create();
AudioTask task { AudioTask::Type::Pause, promise };
if (auto result = m_state->queue_task(move(task)); result.is_error())
promise->reject(result.release_error());
return promise;
}
NonnullRefPtr<Core::ThreadedPromise<void>> PlaybackStreamAudioUnit::discard_buffer_and_suspend()
{
auto promise = Core::ThreadedPromise<void>::create();
AudioTask task { AudioTask::Type::PauseAndDiscard, promise };
if (auto result = m_state->queue_task(move(task)); result.is_error())
promise->reject(result.release_error());
return promise;
}
ErrorOr<AK::Duration> PlaybackStreamAudioUnit::total_time_played()
{
return m_state->last_sample_time();
}
NonnullRefPtr<Core::ThreadedPromise<void>> PlaybackStreamAudioUnit::set_volume(double volume)
{
auto promise = Core::ThreadedPromise<void>::create();
AudioTask task { AudioTask::Type::Volume, promise, volume };
if (auto result = m_state->queue_task(move(task)); result.is_error())
promise->reject(result.release_error());
return promise;
}
void log_os_error_code([[maybe_unused]] OSStatus error_code, [[maybe_unused]] SourceLocation location)
{
#if AUDIO_DEBUG
auto error_string = "Unknown error"sv;
// Errors listed in AUComponent.h
switch (error_code) {
case kAudioUnitErr_InvalidProperty:
error_string = "InvalidProperty"sv;
break;
case kAudioUnitErr_InvalidParameter:
error_string = "InvalidParameter"sv;
break;
case kAudioUnitErr_InvalidElement:
error_string = "InvalidElement"sv;
break;
case kAudioUnitErr_NoConnection:
error_string = "NoConnection"sv;
break;
case kAudioUnitErr_FailedInitialization:
error_string = "FailedInitialization"sv;
break;
case kAudioUnitErr_TooManyFramesToProcess:
error_string = "TooManyFramesToProcess"sv;
break;
case kAudioUnitErr_InvalidFile:
error_string = "InvalidFile"sv;
break;
case kAudioUnitErr_UnknownFileType:
error_string = "UnknownFileType"sv;
break;
case kAudioUnitErr_FileNotSpecified:
error_string = "FileNotSpecified"sv;
break;
case kAudioUnitErr_FormatNotSupported:
error_string = "FormatNotSupported"sv;
break;
case kAudioUnitErr_Uninitialized:
error_string = "Uninitialized"sv;
break;
case kAudioUnitErr_InvalidScope:
error_string = "InvalidScope"sv;
break;
case kAudioUnitErr_PropertyNotWritable:
error_string = "PropertyNotWritable"sv;
break;
case kAudioUnitErr_CannotDoInCurrentContext:
error_string = "CannotDoInCurrentContext"sv;
break;
case kAudioUnitErr_InvalidPropertyValue:
error_string = "InvalidPropertyValue"sv;
break;
case kAudioUnitErr_PropertyNotInUse:
error_string = "PropertyNotInUse"sv;
break;
case kAudioUnitErr_Initialized:
error_string = "Initialized"sv;
break;
case kAudioUnitErr_InvalidOfflineRender:
error_string = "InvalidOfflineRender"sv;
break;
case kAudioUnitErr_Unauthorized:
error_string = "Unauthorized"sv;
break;
case kAudioUnitErr_MIDIOutputBufferFull:
error_string = "MIDIOutputBufferFull"sv;
break;
case kAudioComponentErr_InstanceTimedOut:
error_string = "InstanceTimedOut"sv;
break;
case kAudioComponentErr_InstanceInvalidated:
error_string = "InstanceInvalidated"sv;
break;
case kAudioUnitErr_RenderTimeout:
error_string = "RenderTimeout"sv;
break;
case kAudioUnitErr_ExtensionNotFound:
error_string = "ExtensionNotFound"sv;
break;
case kAudioUnitErr_InvalidParameterValue:
error_string = "InvalidParameterValue"sv;
break;
case kAudioUnitErr_InvalidFilePath:
error_string = "InvalidFilePath"sv;
break;
case kAudioUnitErr_MissingKey:
error_string = "MissingKey"sv;
break;
default:
break;
}
warnln("{}: Audio Unit error {}: {}", location, error_code, error_string);
#endif
}
}

View file

@ -0,0 +1,39 @@
/*
* Copyright (c) 2023, Andrew Kaster <akaster@serenityos.org>
* Copyright (c) 2023, Tim Flynn <trflynn89@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include "PlaybackStream.h"
#include <AK/Error.h>
#include <AK/NonnullRefPtr.h>
namespace Audio {
class AudioState;
class PlaybackStreamAudioUnit final : public PlaybackStream {
public:
static ErrorOr<NonnullRefPtr<PlaybackStream>> create(OutputState initial_output_state, u32 sample_rate, u8 channels, u32 target_latency_ms, AudioDataRequestCallback&& data_request_callback);
virtual void set_underrun_callback(Function<void()>) override;
virtual NonnullRefPtr<Core::ThreadedPromise<AK::Duration>> resume() override;
virtual NonnullRefPtr<Core::ThreadedPromise<void>> drain_buffer_and_suspend() override;
virtual NonnullRefPtr<Core::ThreadedPromise<void>> discard_buffer_and_suspend() override;
virtual ErrorOr<AK::Duration> total_time_played() override;
virtual NonnullRefPtr<Core::ThreadedPromise<void>> set_volume(double) override;
private:
explicit PlaybackStreamAudioUnit(NonnullRefPtr<AudioState>);
~PlaybackStreamAudioUnit();
NonnullRefPtr<AudioState> m_state;
};
}

View file

@ -0,0 +1,157 @@
/*
* Copyright (c) 2024, Olekoop <mlglol360xd@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include "PlaybackStreamOboe.h"
#include <AK/Atomic.h>
#include <AK/SourceLocation.h>
#include <LibCore/SharedCircularQueue.h>
#include <LibCore/ThreadedPromise.h>
#include <memory>
#include <oboe/Oboe.h>
namespace Audio {
class OboeCallback : public oboe::AudioStreamDataCallback {
public:
virtual oboe::DataCallbackResult onAudioReady(oboe::AudioStream* oboeStream, void* audioData, int32_t numFrames) override
{
Bytes output_buffer {
reinterpret_cast<u8*>(audioData),
static_cast<size_t>(numFrames * oboeStream->getChannelCount() * sizeof(float))
};
auto written_bytes = m_data_request_callback(output_buffer, PcmSampleFormat::Float32, numFrames);
if (written_bytes.is_empty())
return oboe::DataCallbackResult::Stop;
auto timestamp = oboeStream->getTimestamp(CLOCK_MONOTONIC);
if (timestamp == oboe::Result::OK) {
m_number_of_samples_enqueued = timestamp.value().position;
} else {
// Fallback for OpenSLES
m_number_of_samples_enqueued += numFrames;
}
auto last_sample_time = static_cast<i64>(m_number_of_samples_enqueued / oboeStream->getSampleRate());
m_last_sample_time.store(last_sample_time);
float* output = (float*)audioData;
for (int frames = 0; frames < numFrames; frames++) {
for (int channels = 0; channels < oboeStream->getChannelCount(); channels++) {
*output++ *= m_volume.load();
}
}
return oboe::DataCallbackResult::Continue;
}
OboeCallback(PlaybackStream::AudioDataRequestCallback data_request_callback)
: m_data_request_callback(move(data_request_callback))
{
}
AK::Duration last_sample_time() const
{
return AK::Duration::from_seconds(m_last_sample_time.load());
}
void set_volume(float volume)
{
m_volume.store(volume);
}
private:
PlaybackStream::AudioDataRequestCallback m_data_request_callback;
Atomic<i64> m_last_sample_time { 0 };
size_t m_number_of_samples_enqueued { 0 };
Atomic<float> m_volume { 1.0 };
};
class PlaybackStreamOboe::Storage : public RefCounted<PlaybackStreamOboe::Storage> {
public:
Storage(std::shared_ptr<oboe::AudioStream> stream, std::shared_ptr<OboeCallback> oboe_callback)
: m_stream(move(stream))
, m_oboe_callback(move(oboe_callback))
{
}
std::shared_ptr<oboe::AudioStream> stream() const { return m_stream; }
std::shared_ptr<OboeCallback> oboe_callback() const { return m_oboe_callback; }
private:
std::shared_ptr<oboe::AudioStream> m_stream;
std::shared_ptr<OboeCallback> m_oboe_callback;
};
PlaybackStreamOboe::PlaybackStreamOboe(NonnullRefPtr<Storage> storage)
: m_storage(move(storage))
{
}
ErrorOr<NonnullRefPtr<PlaybackStream>> PlaybackStreamOboe::create(OutputState initial_output_state, u32 sample_rate, u8 channels, u32, AudioDataRequestCallback&& data_request_callback)
{
std::shared_ptr<oboe::AudioStream> stream;
auto oboe_callback = std::make_shared<OboeCallback>(move(data_request_callback));
oboe::AudioStreamBuilder builder;
auto result = builder.setSharingMode(oboe::SharingMode::Shared)
->setPerformanceMode(oboe::PerformanceMode::LowLatency)
->setFormat(oboe::AudioFormat::Float)
->setDataCallback(oboe_callback)
->setChannelCount(channels)
->setSampleRate(sample_rate)
->openStream(stream);
if (result != oboe::Result::OK)
return Error::from_string_literal("Oboe failed to start");
if (initial_output_state == OutputState::Playing)
stream->requestStart();
auto storage = TRY(adopt_nonnull_ref_or_enomem(new PlaybackStreamOboe::Storage(move(stream), move(oboe_callback))));
return TRY(adopt_nonnull_ref_or_enomem(new (nothrow) PlaybackStreamOboe(move(storage))));
}
PlaybackStreamOboe::~PlaybackStreamOboe() = default;
void PlaybackStreamOboe::set_underrun_callback(Function<void()>)
{
// FIXME: Implement this.
}
NonnullRefPtr<Core::ThreadedPromise<AK::Duration>> PlaybackStreamOboe::resume()
{
auto promise = Core::ThreadedPromise<AK::Duration>::create();
auto time = MUST(total_time_played());
m_storage->stream()->start();
promise->resolve(move(time));
return promise;
}
NonnullRefPtr<Core::ThreadedPromise<void>> PlaybackStreamOboe::drain_buffer_and_suspend()
{
auto promise = Core::ThreadedPromise<void>::create();
m_storage->stream()->stop();
promise->resolve();
return promise;
}
NonnullRefPtr<Core::ThreadedPromise<void>> PlaybackStreamOboe::discard_buffer_and_suspend()
{
auto promise = Core::ThreadedPromise<void>::create();
m_storage->stream()->pause();
m_storage->stream()->flush();
promise->resolve();
return promise;
}
ErrorOr<AK::Duration> PlaybackStreamOboe::total_time_played()
{
return m_storage->oboe_callback()->last_sample_time();
}
NonnullRefPtr<Core::ThreadedPromise<void>> PlaybackStreamOboe::set_volume(double volume)
{
auto promise = Core::ThreadedPromise<void>::create();
m_storage->oboe_callback()->set_volume(volume);
promise->resolve();
return promise;
}
}

View file

@ -0,0 +1,36 @@
/*
* Copyright (c) 2024, Olekoop <mlglol360xd@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include "PlaybackStream.h"
#include <AK/Error.h>
#include <AK/NonnullRefPtr.h>
namespace Audio {
class PlaybackStreamOboe final : public PlaybackStream {
public:
static ErrorOr<NonnullRefPtr<PlaybackStream>> create(OutputState initial_output_state, u32 sample_rate, u8 channels, u32 target_latency_ms, AudioDataRequestCallback&& data_request_callback);
virtual void set_underrun_callback(Function<void()>) override;
virtual NonnullRefPtr<Core::ThreadedPromise<AK::Duration>> resume() override;
virtual NonnullRefPtr<Core::ThreadedPromise<void>> drain_buffer_and_suspend() override;
virtual NonnullRefPtr<Core::ThreadedPromise<void>> discard_buffer_and_suspend() override;
virtual ErrorOr<AK::Duration> total_time_played() override;
virtual NonnullRefPtr<Core::ThreadedPromise<void>> set_volume(double) override;
private:
class Storage;
explicit PlaybackStreamOboe(NonnullRefPtr<Storage>);
~PlaybackStreamOboe();
RefPtr<Storage> m_storage;
};
}

View file

@ -0,0 +1,181 @@
/*
* Copyright (c) 2023, Gregory Bertilson <zaggy1024@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include "PlaybackStreamPulseAudio.h"
#include <LibCore/ThreadedPromise.h>
namespace Audio {
#define TRY_OR_EXIT_THREAD(expression) \
({ \
auto&& __temporary_result = (expression); \
if (__temporary_result.is_error()) [[unlikely]] { \
warnln("Failure in PulseAudio control thread: {}", __temporary_result.error().string_literal()); \
internal_state->exit(); \
return 1; \
} \
__temporary_result.release_value(); \
})
ErrorOr<NonnullRefPtr<PlaybackStream>> PlaybackStreamPulseAudio::create(OutputState initial_state, u32 sample_rate, u8 channels, u32 target_latency_ms, AudioDataRequestCallback&& data_request_callback)
{
VERIFY(data_request_callback);
// Create an internal state for the control thread to hold on to.
auto internal_state = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) InternalState()));
auto playback_stream = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) PlaybackStreamPulseAudio(internal_state)));
// Create the control thread and start it.
auto thread = TRY(Threading::Thread::try_create([=, data_request_callback = move(data_request_callback)]() mutable {
auto context = TRY_OR_EXIT_THREAD(PulseAudioContext::instance());
internal_state->set_stream(TRY_OR_EXIT_THREAD(context->create_stream(initial_state, sample_rate, channels, target_latency_ms, [data_request_callback = move(data_request_callback)](PulseAudioStream&, Bytes buffer, size_t sample_count) {
return data_request_callback(buffer, PcmSampleFormat::Float32, sample_count);
})));
// PulseAudio retains the last volume it sets for an application. We want to consistently
// start at 100% volume instead.
TRY_OR_EXIT_THREAD(internal_state->stream()->set_volume(1.0));
internal_state->thread_loop();
return 0;
},
"Audio::PlaybackStream"sv));
thread->start();
thread->detach();
return playback_stream;
}
PlaybackStreamPulseAudio::PlaybackStreamPulseAudio(NonnullRefPtr<InternalState> state)
: m_state(move(state))
{
}
PlaybackStreamPulseAudio::~PlaybackStreamPulseAudio()
{
m_state->exit();
}
#define TRY_OR_REJECT(expression, ...) \
({ \
auto&& __temporary_result = (expression); \
if (__temporary_result.is_error()) [[unlikely]] { \
promise->reject(__temporary_result.release_error()); \
return __VA_ARGS__; \
} \
__temporary_result.release_value(); \
})
void PlaybackStreamPulseAudio::set_underrun_callback(Function<void()> callback)
{
m_state->enqueue([this, callback = move(callback)]() mutable {
m_state->stream()->set_underrun_callback(move(callback));
});
}
NonnullRefPtr<Core::ThreadedPromise<AK::Duration>> PlaybackStreamPulseAudio::resume()
{
auto promise = Core::ThreadedPromise<AK::Duration>::create();
TRY_OR_REJECT(m_state->check_is_running(), promise);
m_state->enqueue([this, promise]() {
TRY_OR_REJECT(m_state->stream()->resume());
promise->resolve(TRY_OR_REJECT(m_state->stream()->total_time_played()));
});
return promise;
}
NonnullRefPtr<Core::ThreadedPromise<void>> PlaybackStreamPulseAudio::drain_buffer_and_suspend()
{
auto promise = Core::ThreadedPromise<void>::create();
TRY_OR_REJECT(m_state->check_is_running(), promise);
m_state->enqueue([this, promise]() {
TRY_OR_REJECT(m_state->stream()->drain_and_suspend());
promise->resolve();
});
return promise;
}
NonnullRefPtr<Core::ThreadedPromise<void>> PlaybackStreamPulseAudio::discard_buffer_and_suspend()
{
auto promise = Core::ThreadedPromise<void>::create();
TRY_OR_REJECT(m_state->check_is_running(), promise);
m_state->enqueue([this, promise]() {
TRY_OR_REJECT(m_state->stream()->flush_and_suspend());
promise->resolve();
});
return promise;
}
ErrorOr<AK::Duration> PlaybackStreamPulseAudio::total_time_played()
{
if (m_state->stream() != nullptr)
return m_state->stream()->total_time_played();
return AK::Duration::zero();
}
NonnullRefPtr<Core::ThreadedPromise<void>> PlaybackStreamPulseAudio::set_volume(double volume)
{
auto promise = Core::ThreadedPromise<void>::create();
TRY_OR_REJECT(m_state->check_is_running(), promise);
m_state->enqueue([this, promise, volume]() {
TRY_OR_REJECT(m_state->stream()->set_volume(volume));
promise->resolve();
});
return promise;
}
ErrorOr<void> PlaybackStreamPulseAudio::InternalState::check_is_running()
{
if (m_exit)
return Error::from_string_literal("PulseAudio control thread loop is not running");
return {};
}
void PlaybackStreamPulseAudio::InternalState::set_stream(NonnullRefPtr<PulseAudioStream> const& stream)
{
m_stream = stream;
}
RefPtr<PulseAudioStream> PlaybackStreamPulseAudio::InternalState::stream()
{
return m_stream;
}
void PlaybackStreamPulseAudio::InternalState::enqueue(Function<void()>&& task)
{
Threading::MutexLocker locker { m_mutex };
m_tasks.enqueue(forward<Function<void()>>(task));
m_wake_condition.signal();
}
void PlaybackStreamPulseAudio::InternalState::thread_loop()
{
while (true) {
auto task = [this]() -> Function<void()> {
Threading::MutexLocker locker { m_mutex };
while (m_tasks.is_empty() && !m_exit)
m_wake_condition.wait();
if (m_exit)
return nullptr;
return m_tasks.dequeue();
}();
if (!task) {
VERIFY(m_exit);
break;
}
task();
}
}
void PlaybackStreamPulseAudio::InternalState::exit()
{
m_exit = true;
m_wake_condition.signal();
}
}

View file

@ -0,0 +1,58 @@
/*
* Copyright (c) 2023, Gregory Bertilson <zaggy1024@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include "PlaybackStream.h"
#include "PulseAudioWrappers.h"
namespace Audio {
class PlaybackStreamPulseAudio final
: public PlaybackStream {
public:
static ErrorOr<NonnullRefPtr<PlaybackStream>> create(OutputState initial_state, u32 sample_rate, u8 channels, u32 target_latency_ms, AudioDataRequestCallback&& data_request_callback);
virtual void set_underrun_callback(Function<void()>) override;
virtual NonnullRefPtr<Core::ThreadedPromise<AK::Duration>> resume() override;
virtual NonnullRefPtr<Core::ThreadedPromise<void>> drain_buffer_and_suspend() override;
virtual NonnullRefPtr<Core::ThreadedPromise<void>> discard_buffer_and_suspend() override;
virtual ErrorOr<AK::Duration> total_time_played() override;
virtual NonnullRefPtr<Core::ThreadedPromise<void>> set_volume(double) override;
private:
// This struct is kept alive until the control thread exits to prevent a use-after-free without blocking on
// the UI thread.
class InternalState : public AtomicRefCounted<InternalState> {
public:
void set_stream(NonnullRefPtr<PulseAudioStream> const&);
RefPtr<PulseAudioStream> stream();
void enqueue(Function<void()>&&);
void thread_loop();
ErrorOr<void> check_is_running();
void exit();
private:
RefPtr<PulseAudioStream> m_stream { nullptr };
Queue<Function<void()>> m_tasks;
Threading::Mutex m_mutex;
Threading::ConditionVariable m_wake_condition { m_mutex };
Atomic<bool> m_exit { false };
};
PlaybackStreamPulseAudio(NonnullRefPtr<InternalState>);
~PlaybackStreamPulseAudio();
RefPtr<InternalState> m_state;
};
}

View file

@ -0,0 +1,499 @@
/*
* Copyright (c) 2023, Gregory Bertilson <zaggy1024@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include "PulseAudioWrappers.h"
#include <AK/WeakPtr.h>
#include <LibThreading/Mutex.h>
namespace Audio {
WeakPtr<PulseAudioContext> PulseAudioContext::weak_instance()
{
// Use a weak pointer to allow the context to be shut down if we stop outputting audio.
static WeakPtr<PulseAudioContext> the_instance;
return the_instance;
}
ErrorOr<NonnullRefPtr<PulseAudioContext>> PulseAudioContext::instance()
{
static Threading::Mutex instantiation_mutex;
// Lock and unlock the mutex to ensure that the mutex is fully unlocked at application
// exit.
atexit([]() {
instantiation_mutex.lock();
instantiation_mutex.unlock();
});
auto instantiation_locker = Threading::MutexLocker(instantiation_mutex);
auto the_instance = weak_instance();
RefPtr<PulseAudioContext> strong_instance_pointer = the_instance.strong_ref();
if (strong_instance_pointer == nullptr) {
auto* main_loop = pa_threaded_mainloop_new();
if (main_loop == nullptr)
return Error::from_string_literal("Failed to create PulseAudio main loop");
auto* api = pa_threaded_mainloop_get_api(main_loop);
if (api == nullptr)
return Error::from_string_literal("Failed to get PulseAudio API");
auto* context = pa_context_new(api, "Ladybird");
if (context == nullptr)
return Error::from_string_literal("Failed to get PulseAudio connection context");
strong_instance_pointer = make_ref_counted<PulseAudioContext>(main_loop, api, context);
// Set a callback to signal ourselves to wake when the state changes, so that we can
// synchronously wait for the connection.
pa_context_set_state_callback(
context, [](pa_context*, void* user_data) {
static_cast<PulseAudioContext*>(user_data)->signal_to_wake();
},
strong_instance_pointer.ptr());
if (auto error = pa_context_connect(context, nullptr, PA_CONTEXT_NOFLAGS, nullptr); error < 0) {
warnln("Starting PulseAudio context connection failed with error: {}", pulse_audio_error_to_string(static_cast<PulseAudioErrorCode>(-error)));
return Error::from_string_literal("Error while starting PulseAudio daemon connection");
}
if (auto error = pa_threaded_mainloop_start(main_loop); error < 0) {
warnln("Starting PulseAudio main loop failed with error: {}", pulse_audio_error_to_string(static_cast<PulseAudioErrorCode>(-error)));
return Error::from_string_literal("Failed to start PulseAudio main loop");
}
{
auto locker = strong_instance_pointer->main_loop_locker();
while (true) {
bool is_ready = false;
switch (strong_instance_pointer->get_connection_state()) {
case PulseAudioContextState::Connecting:
case PulseAudioContextState::Authorizing:
case PulseAudioContextState::SettingName:
break;
case PulseAudioContextState::Ready:
is_ready = true;
break;
case PulseAudioContextState::Failed:
warnln("PulseAudio server connection failed with error: {}", pulse_audio_error_to_string(strong_instance_pointer->get_last_error()));
return Error::from_string_literal("Failed to connect to PulseAudio server");
case PulseAudioContextState::Unconnected:
case PulseAudioContextState::Terminated:
VERIFY_NOT_REACHED();
break;
}
if (is_ready)
break;
strong_instance_pointer->wait_for_signal();
}
pa_context_set_state_callback(context, nullptr, nullptr);
}
the_instance = strong_instance_pointer;
}
return strong_instance_pointer.release_nonnull();
}
PulseAudioContext::PulseAudioContext(pa_threaded_mainloop* main_loop, pa_mainloop_api* api, pa_context* context)
: m_main_loop(main_loop)
, m_api(api)
, m_context(context)
{
}
PulseAudioContext::~PulseAudioContext()
{
{
auto locker = main_loop_locker();
pa_context_disconnect(m_context);
pa_context_unref(m_context);
}
pa_threaded_mainloop_stop(m_main_loop);
pa_threaded_mainloop_free(m_main_loop);
}
bool PulseAudioContext::current_thread_is_main_loop_thread()
{
return static_cast<bool>(pa_threaded_mainloop_in_thread(m_main_loop));
}
void PulseAudioContext::lock_main_loop()
{
if (!current_thread_is_main_loop_thread())
pa_threaded_mainloop_lock(m_main_loop);
}
void PulseAudioContext::unlock_main_loop()
{
if (!current_thread_is_main_loop_thread())
pa_threaded_mainloop_unlock(m_main_loop);
}
void PulseAudioContext::wait_for_signal()
{
pa_threaded_mainloop_wait(m_main_loop);
}
void PulseAudioContext::signal_to_wake()
{
pa_threaded_mainloop_signal(m_main_loop, 0);
}
PulseAudioContextState PulseAudioContext::get_connection_state()
{
return static_cast<PulseAudioContextState>(pa_context_get_state(m_context));
}
bool PulseAudioContext::connection_is_good()
{
return PA_CONTEXT_IS_GOOD(pa_context_get_state(m_context));
}
PulseAudioErrorCode PulseAudioContext::get_last_error()
{
return static_cast<PulseAudioErrorCode>(pa_context_errno(m_context));
}
#define STREAM_SIGNAL_CALLBACK(stream) \
[](auto*, int, void* user_data) { \
static_cast<PulseAudioStream*>(user_data)->m_context->signal_to_wake(); \
}, \
(stream)
ErrorOr<NonnullRefPtr<PulseAudioStream>> PulseAudioContext::create_stream(OutputState initial_state, u32 sample_rate, u8 channels, u32 target_latency_ms, PulseAudioDataRequestCallback write_callback)
{
auto locker = main_loop_locker();
VERIFY(get_connection_state() == PulseAudioContextState::Ready);
pa_sample_spec sample_specification {
// FIXME: Support more audio sample types.
__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ ? PA_SAMPLE_FLOAT32LE : PA_SAMPLE_FLOAT32BE,
sample_rate,
channels,
};
// Check the sample specification and channel map here. These are also checked by stream_new(),
// but we can return a more accurate error if we check beforehand.
if (pa_sample_spec_valid(&sample_specification) == 0)
return Error::from_string_literal("PulseAudio sample specification is invalid");
pa_channel_map channel_map;
if (pa_channel_map_init_auto(&channel_map, sample_specification.channels, PA_CHANNEL_MAP_DEFAULT) == 0) {
warnln("Getting default PulseAudio channel map failed with error: {}", pulse_audio_error_to_string(get_last_error()));
return Error::from_string_literal("Failed to get default PulseAudio channel map");
}
// Create the stream object and set a callback to signal ourselves to wake when the stream changes states,
// allowing us to wait synchronously for it to become Ready or Failed.
auto* stream = pa_stream_new_with_proplist(m_context, "Audio Stream", &sample_specification, &channel_map, nullptr);
if (stream == nullptr) {
warnln("Instantiating PulseAudio stream failed with error: {}", pulse_audio_error_to_string(get_last_error()));
return Error::from_string_literal("Failed to create PulseAudio stream");
}
pa_stream_set_state_callback(
stream, [](pa_stream*, void* user_data) {
static_cast<PulseAudioContext*>(user_data)->signal_to_wake();
},
this);
auto stream_wrapper = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) PulseAudioStream(NonnullRefPtr(*this), stream)));
stream_wrapper->m_write_callback = move(write_callback);
pa_stream_set_write_callback(
stream, [](pa_stream* stream, size_t bytes_to_write, void* user_data) {
auto& stream_wrapper = *static_cast<PulseAudioStream*>(user_data);
VERIFY(stream_wrapper.m_stream == stream);
stream_wrapper.on_write_requested(bytes_to_write);
},
stream_wrapper.ptr());
// Borrowing logic from cubeb to set reasonable buffer sizes for a target latency:
// https://searchfox.org/mozilla-central/rev/3b707c8fd7e978eebf24279ee51ccf07895cfbcb/third_party/rust/cubeb-sys/libcubeb/src/cubeb_pulse.c#910-927
pa_buffer_attr buffer_attributes;
buffer_attributes.maxlength = -1;
buffer_attributes.prebuf = -1;
buffer_attributes.tlength = target_latency_ms * sample_rate / 1000;
buffer_attributes.minreq = buffer_attributes.tlength / 4;
buffer_attributes.fragsize = buffer_attributes.minreq;
auto flags = static_cast<pa_stream_flags>(PA_STREAM_AUTO_TIMING_UPDATE | PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_ADJUST_LATENCY | PA_STREAM_RELATIVE_VOLUME);
if (initial_state == OutputState::Suspended) {
stream_wrapper->m_suspended = true;
flags = static_cast<pa_stream_flags>(static_cast<u32>(flags) | PA_STREAM_START_CORKED);
}
// This is a workaround for an issue with starting the stream corked, see PulseAudioPlaybackStream::total_time_played().
pa_stream_set_started_callback(
stream, [](pa_stream* stream, void* user_data) {
static_cast<PulseAudioStream*>(user_data)->m_started_playback = true;
pa_stream_set_started_callback(stream, nullptr, nullptr);
},
stream_wrapper.ptr());
pa_stream_set_underflow_callback(
stream, [](pa_stream*, void* user_data) {
auto& stream = *static_cast<PulseAudioStream*>(user_data);
if (stream.m_underrun_callback)
stream.m_underrun_callback();
},
stream_wrapper.ptr());
if (auto error = pa_stream_connect_playback(stream, nullptr, &buffer_attributes, flags, nullptr, nullptr); error != 0) {
warnln("Failed to start PulseAudio stream connection with error: {}", pulse_audio_error_to_string(static_cast<PulseAudioErrorCode>(error)));
return Error::from_string_literal("Error while connecting the PulseAudio stream");
}
while (true) {
bool is_ready = false;
switch (stream_wrapper->get_connection_state()) {
case PulseAudioStreamState::Creating:
break;
case PulseAudioStreamState::Ready:
is_ready = true;
break;
case PulseAudioStreamState::Failed:
warnln("PulseAudio stream connection failed with error: {}", pulse_audio_error_to_string(get_last_error()));
return Error::from_string_literal("Failed to connect to PulseAudio daemon");
case PulseAudioStreamState::Unconnected:
case PulseAudioStreamState::Terminated:
VERIFY_NOT_REACHED();
break;
}
if (is_ready)
break;
wait_for_signal();
}
pa_stream_set_state_callback(stream, nullptr, nullptr);
return stream_wrapper;
}
PulseAudioStream::~PulseAudioStream()
{
auto locker = m_context->main_loop_locker();
pa_stream_set_write_callback(m_stream, nullptr, nullptr);
pa_stream_set_underflow_callback(m_stream, nullptr, nullptr);
pa_stream_set_started_callback(m_stream, nullptr, nullptr);
pa_stream_disconnect(m_stream);
pa_stream_unref(m_stream);
}
PulseAudioStreamState PulseAudioStream::get_connection_state()
{
return static_cast<PulseAudioStreamState>(pa_stream_get_state(m_stream));
}
bool PulseAudioStream::connection_is_good()
{
return PA_STREAM_IS_GOOD(pa_stream_get_state(m_stream));
}
void PulseAudioStream::set_underrun_callback(Function<void()> callback)
{
auto locker = m_context->main_loop_locker();
m_underrun_callback = move(callback);
}
u32 PulseAudioStream::sample_rate()
{
return pa_stream_get_sample_spec(m_stream)->rate;
}
size_t PulseAudioStream::sample_size()
{
return pa_sample_size(pa_stream_get_sample_spec(m_stream));
}
size_t PulseAudioStream::frame_size()
{
return pa_frame_size(pa_stream_get_sample_spec(m_stream));
}
u8 PulseAudioStream::channel_count()
{
return pa_stream_get_sample_spec(m_stream)->channels;
}
void PulseAudioStream::on_write_requested(size_t bytes_to_write)
{
VERIFY(m_write_callback);
if (m_suspended)
return;
while (bytes_to_write > 0) {
auto buffer = begin_write(bytes_to_write).release_value_but_fixme_should_propagate_errors();
auto frame_size = this->frame_size();
VERIFY(buffer.size() % frame_size == 0);
auto written_buffer = m_write_callback(*this, buffer, buffer.size() / frame_size);
if (written_buffer.size() == 0) {
cancel_write().release_value_but_fixme_should_propagate_errors();
break;
}
bytes_to_write -= written_buffer.size();
write(written_buffer).release_value_but_fixme_should_propagate_errors();
}
}
ErrorOr<Bytes> PulseAudioStream::begin_write(size_t bytes_to_write)
{
void* data_pointer;
size_t data_size = bytes_to_write;
if (pa_stream_begin_write(m_stream, &data_pointer, &data_size) != 0 || data_pointer == nullptr)
return Error::from_string_literal("Failed to get the playback stream's write buffer from PulseAudio");
return Bytes { data_pointer, data_size };
}
ErrorOr<void> PulseAudioStream::write(ReadonlyBytes data)
{
if (pa_stream_write(m_stream, data.data(), data.size(), nullptr, 0, PA_SEEK_RELATIVE) != 0)
return Error::from_string_literal("Failed to write data to PulseAudio playback stream");
return {};
}
ErrorOr<void> PulseAudioStream::cancel_write()
{
if (pa_stream_cancel_write(m_stream) != 0)
return Error::from_string_literal("Failed to get the playback stream's write buffer from PulseAudio");
return {};
}
bool PulseAudioStream::is_suspended() const
{
return m_suspended;
}
StringView pulse_audio_error_to_string(PulseAudioErrorCode code)
{
if (code < PulseAudioErrorCode::OK || code >= PulseAudioErrorCode::Sentinel)
return "Unknown error code"sv;
char const* string = pa_strerror(static_cast<int>(code));
return StringView { string, strlen(string) };
}
ErrorOr<void> PulseAudioStream::wait_for_operation(pa_operation* operation, StringView error_message)
{
while (pa_operation_get_state(operation) == PA_OPERATION_RUNNING)
m_context->wait_for_signal();
if (!m_context->connection_is_good() || !this->connection_is_good()) {
auto pulse_audio_error_name = pulse_audio_error_to_string(m_context->get_last_error());
warnln("Encountered stream error: {}", pulse_audio_error_name);
return Error::from_string_view(error_message);
}
pa_operation_unref(operation);
return {};
}
ErrorOr<void> PulseAudioStream::drain_and_suspend()
{
auto locker = m_context->main_loop_locker();
if (m_suspended)
return {};
m_suspended = true;
if (pa_stream_is_corked(m_stream) > 0)
return {};
TRY(wait_for_operation(pa_stream_drain(m_stream, STREAM_SIGNAL_CALLBACK(this)), "Draining PulseAudio stream failed"sv));
TRY(wait_for_operation(pa_stream_cork(m_stream, 1, STREAM_SIGNAL_CALLBACK(this)), "Corking PulseAudio stream after drain failed"sv));
return {};
}
ErrorOr<void> PulseAudioStream::flush_and_suspend()
{
auto locker = m_context->main_loop_locker();
if (m_suspended)
return {};
m_suspended = true;
if (pa_stream_is_corked(m_stream) > 0)
return {};
TRY(wait_for_operation(pa_stream_flush(m_stream, STREAM_SIGNAL_CALLBACK(this)), "Flushing PulseAudio stream failed"sv));
TRY(wait_for_operation(pa_stream_cork(m_stream, 1, STREAM_SIGNAL_CALLBACK(this)), "Corking PulseAudio stream after flush failed"sv));
return {};
}
ErrorOr<void> PulseAudioStream::resume()
{
auto locker = m_context->main_loop_locker();
if (!m_suspended)
return {};
m_suspended = false;
TRY(wait_for_operation(pa_stream_cork(m_stream, 0, STREAM_SIGNAL_CALLBACK(this)), "Uncorking PulseAudio stream failed"sv));
// Defer a write to the playback buffer on the PulseAudio main loop. Otherwise, playback will not
// begin again, despite the fact that we uncorked.
// NOTE: We ref here and then unref in the callback so that this stream will not be deleted until
// it finishes.
ref();
pa_mainloop_api_once(
m_context->m_api, [](pa_mainloop_api*, void* user_data) {
auto& stream = *static_cast<PulseAudioStream*>(user_data);
// NOTE: writable_size() returns -1 in case of an error. However, the value is still safe
// since begin_write() will interpret -1 as a default parameter and choose a good size.
auto bytes_to_write = pa_stream_writable_size(stream.m_stream);
stream.on_write_requested(bytes_to_write);
stream.unref();
},
this);
return {};
}
ErrorOr<AK::Duration> PulseAudioStream::total_time_played()
{
auto locker = m_context->main_loop_locker();
// NOTE: This is a workaround for a PulseAudio issue. When a stream is started corked,
// the time smoother doesn't seem to be aware of it, so it will return the time
// since the stream was connected. Once the playback actually starts, the time
// resets back to zero. However, since we request monotonically-increasing time,
// this means that the smoother will register that it had a larger time before,
// and return that time instead, until we reach a timestamp greater than the
// last-returned time. If we never call pa_stream_get_time() until after giving
// the stream its first samples, the issue never occurs.
if (!m_started_playback)
return AK::Duration::zero();
pa_usec_t time = 0;
auto error = pa_stream_get_time(m_stream, &time);
if (error == -PA_ERR_NODATA)
return AK::Duration::zero();
if (error != 0)
return Error::from_string_literal("Failed to get time from PulseAudio stream");
if (time > NumericLimits<i64>::max()) {
warnln("WARNING: Audio time is too large!");
time -= NumericLimits<i64>::max();
}
return AK::Duration::from_microseconds(static_cast<i64>(time));
}
ErrorOr<void> PulseAudioStream::set_volume(double volume)
{
auto locker = m_context->main_loop_locker();
auto index = pa_stream_get_index(m_stream);
if (index == PA_INVALID_INDEX)
return Error::from_string_literal("Failed to get PulseAudio stream index while setting volume");
auto pulse_volume = pa_sw_volume_from_linear(volume);
pa_cvolume per_channel_volumes;
pa_cvolume_set(&per_channel_volumes, channel_count(), pulse_volume);
auto* operation = pa_context_set_sink_input_volume(m_context->m_context, index, &per_channel_volumes, STREAM_SIGNAL_CALLBACK(this));
return wait_for_operation(operation, "Failed to set PulseAudio stream volume"sv);
}
}

View file

@ -0,0 +1,185 @@
/*
* Copyright (c) 2023, Gregory Bertilson <zaggy1024@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include "Forward.h"
#include "PlaybackStream.h"
#include "SampleFormats.h"
#include <AK/AtomicRefCounted.h>
#include <AK/Error.h>
#include <AK/NonnullRefPtr.h>
#include <AK/Time.h>
#include <LibThreading/Thread.h>
#include <pulse/pulseaudio.h>
namespace Audio {
class PulseAudioStream;
enum class PulseAudioContextState {
Unconnected = PA_CONTEXT_UNCONNECTED,
Connecting = PA_CONTEXT_CONNECTING,
Authorizing = PA_CONTEXT_AUTHORIZING,
SettingName = PA_CONTEXT_SETTING_NAME,
Ready = PA_CONTEXT_READY,
Failed = PA_CONTEXT_FAILED,
Terminated = PA_CONTEXT_TERMINATED,
};
enum class PulseAudioErrorCode;
using PulseAudioDataRequestCallback = Function<ReadonlyBytes(PulseAudioStream&, Bytes buffer, size_t sample_count)>;
// A wrapper around the PulseAudio main loop and context structs.
// Generally, only one instance of this should be needed for a single process.
class PulseAudioContext
: public AtomicRefCounted<PulseAudioContext>
, public Weakable<PulseAudioContext> {
public:
static AK::WeakPtr<PulseAudioContext> weak_instance();
static ErrorOr<NonnullRefPtr<PulseAudioContext>> instance();
explicit PulseAudioContext(pa_threaded_mainloop*, pa_mainloop_api*, pa_context*);
PulseAudioContext(PulseAudioContext const& other) = delete;
~PulseAudioContext();
bool current_thread_is_main_loop_thread();
void lock_main_loop();
void unlock_main_loop();
[[nodiscard]] auto main_loop_locker()
{
lock_main_loop();
return ScopeGuard([this]() { unlock_main_loop(); });
}
// Waits for signal_to_wake() to be called.
// This must be called with the main loop locked.
void wait_for_signal();
// Signals to wake all threads from calls to signal_to_wake()
void signal_to_wake();
PulseAudioContextState get_connection_state();
bool connection_is_good();
PulseAudioErrorCode get_last_error();
ErrorOr<NonnullRefPtr<PulseAudioStream>> create_stream(OutputState initial_state, u32 sample_rate, u8 channels, u32 target_latency_ms, PulseAudioDataRequestCallback write_callback);
private:
friend class PulseAudioStream;
pa_threaded_mainloop* m_main_loop { nullptr };
pa_mainloop_api* m_api { nullptr };
pa_context* m_context;
};
enum class PulseAudioStreamState {
Unconnected = PA_STREAM_UNCONNECTED,
Creating = PA_STREAM_CREATING,
Ready = PA_STREAM_READY,
Failed = PA_STREAM_FAILED,
Terminated = PA_STREAM_TERMINATED,
};
class PulseAudioStream : public AtomicRefCounted<PulseAudioStream> {
public:
static constexpr bool start_corked = true;
~PulseAudioStream();
PulseAudioStreamState get_connection_state();
bool connection_is_good();
// Sets the callback to be run when the server consumes more of the buffer than
// has been written yet.
void set_underrun_callback(Function<void()>);
u32 sample_rate();
size_t sample_size();
size_t frame_size();
u8 channel_count();
// Gets a data buffer that can be written to and then passed back to PulseAudio through
// the write() function. This avoids a copy vs directly calling write().
ErrorOr<Bytes> begin_write(size_t bytes_to_write = NumericLimits<size_t>::max());
// Writes a data buffer to the playback stream.
ErrorOr<void> write(ReadonlyBytes data);
// Cancels the previous begin_write() call.
ErrorOr<void> cancel_write();
bool is_suspended() const;
// Plays back all buffered data and corks the stream. Until resume() is called, no data
// will be written to the stream.
ErrorOr<void> drain_and_suspend();
// Drops all buffered data and corks the stream. Until resume() is called, no data will
// be written to the stream.
ErrorOr<void> flush_and_suspend();
// Uncorks the stream and forces data to be written to the buffers to force playback to
// resume as soon as possible.
ErrorOr<void> resume();
ErrorOr<AK::Duration> total_time_played();
ErrorOr<void> set_volume(double volume);
PulseAudioContext& context() { return *m_context; }
private:
friend class PulseAudioContext;
explicit PulseAudioStream(NonnullRefPtr<PulseAudioContext>&& context, pa_stream* stream)
: m_context(context)
, m_stream(stream)
{
}
PulseAudioStream(PulseAudioStream const& other) = delete;
ErrorOr<void> wait_for_operation(pa_operation*, StringView error_message);
void on_write_requested(size_t bytes_to_write);
NonnullRefPtr<PulseAudioContext> m_context;
pa_stream* m_stream { nullptr };
bool m_started_playback { false };
PulseAudioDataRequestCallback m_write_callback { nullptr };
// Determines whether we will allow the write callback to run. This should only be true
// if the stream is becoming or is already corked.
bool m_suspended { false };
Function<void()> m_underrun_callback;
};
enum class PulseAudioErrorCode {
OK = 0,
AccessFailure,
UnknownCommand,
InvalidArgument,
EntityExists,
NoSuchEntity,
ConnectionRefused,
ProtocolError,
Timeout,
NoAuthenticationKey,
InternalError,
ConnectionTerminated,
EntityKilled,
InvalidServer,
NoduleInitFailed,
BadState,
NoData,
IncompatibleProtocolVersion,
DataTooLarge,
NotSupported,
Unknown,
NoExtension,
Obsolete,
NotImplemented,
CalledFromFork,
IOError,
Busy,
Sentinel
};
StringView pulse_audio_error_to_string(PulseAudioErrorCode code);
}

View file

@ -0,0 +1,174 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <andreas@ladybird.org>
* Copyright (c) 2021, kleines Filmröllchen <filmroellchen@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Format.h>
#include <AK/Math.h>
namespace Audio {
using AK::Exponentials::exp;
using AK::Exponentials::log;
// Constants for logarithmic volume. See Sample::linear_to_log
// Corresponds to 60dB
constexpr float DYNAMIC_RANGE = 1000;
constexpr float VOLUME_A = 1 / DYNAMIC_RANGE;
float const VOLUME_B = log(DYNAMIC_RANGE);
// A single sample in an audio buffer.
// Values are floating point, and should range from -1.0 to +1.0
struct Sample {
constexpr Sample() = default;
// For mono
constexpr explicit Sample(float left)
: left(left)
, right(left)
{
}
// For stereo
constexpr Sample(float left, float right)
: left(left)
, right(right)
{
}
// Returns the absolute maximum range (separate per channel) of the given sample buffer.
// For example { 0.8, 0 } means that samples on the left channel occupy the range { -0.8, 0.8 },
// while all samples on the right channel are 0.
static Sample max_range(ReadonlySpan<Sample> span)
{
Sample result { NumericLimits<float>::min_normal(), NumericLimits<float>::min_normal() };
for (Sample sample : span) {
result.left = max(result.left, AK::fabs(sample.left));
result.right = max(result.right, AK::fabs(sample.right));
}
return result;
}
void clip()
{
if (left > 1)
left = 1;
else if (left < -1)
left = -1;
if (right > 1)
right = 1;
else if (right < -1)
right = -1;
}
// Logarithmic scaling, as audio should ALWAYS do.
// Reference: https://www.dr-lex.be/info-stuff/volumecontrols.html
// We use the curve `factor = a * exp(b * change)`,
// where change is the input fraction we want to change by,
// a = 1/1000, b = ln(1000) = 6.908 and factor is the multiplier used.
// The value 1000 represents the dynamic range in sound pressure, which corresponds to 60 dB(A).
// This is a good dynamic range because it can represent all loudness values from
// 30 dB(A) (barely hearable with background noise)
// to 90 dB(A) (almost too loud to hear and about the reasonable limit of actual sound equipment).
//
// Format ranges:
// - Linear: 0.0 to 1.0
// - Logarithmic: 0.0 to 1.0
ALWAYS_INLINE float linear_to_log(float const change) const
{
// TODO: Add linear slope around 0
return VOLUME_A * exp(VOLUME_B * change);
}
ALWAYS_INLINE float log_to_linear(float const val) const
{
// TODO: Add linear slope around 0
return log(val / VOLUME_A) / VOLUME_B;
}
ALWAYS_INLINE Sample& log_multiply(float const change)
{
float factor = linear_to_log(change);
left *= factor;
right *= factor;
return *this;
}
ALWAYS_INLINE Sample log_multiplied(float const volume_change) const
{
Sample new_frame { left, right };
new_frame.log_multiply(volume_change);
return new_frame;
}
// Constant power panning
ALWAYS_INLINE Sample& pan(float const position)
{
float const pi_over_2 = AK::Pi<float> * 0.5f;
float const root_over_2 = AK::sqrt<float>(2.0) * 0.5f;
float const angle = position * pi_over_2 * 0.5f;
float s, c;
AK::sincos<float>(angle, s, c);
left *= root_over_2 * (c - s);
right *= root_over_2 * (c + s);
return *this;
}
ALWAYS_INLINE Sample panned(float const position) const
{
Sample new_sample { left, right };
new_sample.pan(position);
return new_sample;
}
constexpr Sample& operator*=(float const mult)
{
left *= mult;
right *= mult;
return *this;
}
constexpr Sample operator*(float const mult) const
{
return { left * mult, right * mult };
}
constexpr Sample& operator+=(Sample const& other)
{
left += other.left;
right += other.right;
return *this;
}
constexpr Sample& operator+=(float other)
{
left += other;
right += other;
return *this;
}
constexpr Sample operator+(Sample const& other) const
{
return { left + other.left, right + other.right };
}
float left { 0 };
float right { 0 };
};
}
namespace AK {
template<>
struct Formatter<Audio::Sample> : Formatter<FormatString> {
ErrorOr<void> format(FormatBuilder& builder, Audio::Sample const& value)
{
return Formatter<FormatString>::format(builder, "[{}, {}]"sv, value.left, value.right);
}
};
}

View file

@ -0,0 +1,30 @@
/*
* Copyright (c) 2022, kleines Filmröllchen <filmroellchen@serenityos.org>.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include "SampleFormats.h"
namespace Audio {
u16 pcm_bits_per_sample(PcmSampleFormat format)
{
switch (format) {
case PcmSampleFormat::Uint8:
return 8;
case PcmSampleFormat::Int16:
return 16;
case PcmSampleFormat::Int24:
return 24;
case PcmSampleFormat::Int32:
case PcmSampleFormat::Float32:
return 32;
case PcmSampleFormat::Float64:
return 64;
default:
VERIFY_NOT_REACHED();
}
}
}

View file

@ -0,0 +1,27 @@
/*
* Copyright (c) 2022, kleines Filmröllchen <filmroellchen@serenityos.org>.
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/ByteString.h>
#include <AK/Types.h>
namespace Audio {
// Supported PCM sample formats.
enum class PcmSampleFormat : u8 {
Uint8,
Int16,
Int24,
Int32,
Float32,
Float64,
};
// Most of the read code only cares about how many bits to read or write
u16 pcm_bits_per_sample(PcmSampleFormat format);
}